code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = RobertaTokenizer
A__ : Any = RobertaTokenizerFast
A__ : Optional[Any] = True
A__ : Dict = {'''cls_token''': '''<s>'''}
def snake_case_ ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowercase : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowercase : Optional[int] = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowercase : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowercase : int = {'''unk_token''': '''<unk>'''}
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def snake_case_ ( self : str , **_snake_case : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self : Union[str, Any] , **_snake_case : str ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case )
def snake_case_ ( self : str , _snake_case : str ):
__lowercase : List[Any] = '''lower newer'''
__lowercase : Optional[Any] = '''lower newer'''
return input_text, output_text
def snake_case_ ( self : List[str] ):
__lowercase : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : str = '''lower newer'''
__lowercase : List[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowercase : str = tokenizer.tokenize(_snake_case ) # , add_prefix_space=True)
self.assertListEqual(_snake_case , _snake_case )
__lowercase : Tuple = tokens + [tokenizer.unk_token]
__lowercase : List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def snake_case_ ( self : Any ):
__lowercase : Dict = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_snake_case ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_snake_case ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def snake_case_ ( self : Any ):
__lowercase : Optional[int] = self.tokenizer_class.from_pretrained('''roberta-base''' )
__lowercase : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_snake_case )
__lowercase : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_snake_case )
__lowercase : str = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowercase : Optional[Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowercase : str = tokenizer.build_inputs_with_special_tokens(_snake_case )
__lowercase : int = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def snake_case_ ( self : Dict ):
__lowercase : Dict = self.get_tokenizer()
__lowercase : List[str] = '''Encode this sequence.'''
__lowercase : Optional[int] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__lowercase : Optional[int] = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowercase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_snake_case , _snake_case )
__lowercase : Tuple = tokenizer.encode(_snake_case , add_special_tokens=_snake_case , add_prefix_space=_snake_case )
__lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_snake_case , _snake_case )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__lowercase : Any = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
__lowercase : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
# Testing spaces after special tokens
__lowercase : int = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case )} ) # mask token has a left space
__lowercase : str = tokenizer.convert_tokens_to_ids(_snake_case )
__lowercase : Union[str, Any] = '''Encode <mask> sequence'''
__lowercase : str = '''Encode <mask>sequence'''
__lowercase : List[Any] = tokenizer.encode(_snake_case )
__lowercase : Tuple = encoded.index(_snake_case )
__lowercase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_snake_case , _snake_case )
__lowercase : Union[str, Any] = tokenizer.encode(_snake_case )
__lowercase : int = encoded.index(_snake_case )
__lowercase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_snake_case , _snake_case )
def snake_case_ ( self : List[Any] ):
pass
def snake_case_ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
__lowercase : str = '''A, <mask> AllenNLP sentence.'''
__lowercase : int = tokenizer_r.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
__lowercase : Optional[int] = tokenizer_p.encode_plus(_snake_case , add_special_tokens=_snake_case , return_token_type_ids=_snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__lowercase : int = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__lowercase : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_snake_case , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def snake_case_ ( self : Optional[int] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__lowercase : Tuple = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _snake_case )
self.assertEqual(post_processor_state['''add_prefix_space'''] , _snake_case )
self.assertEqual(post_processor_state['''trim_offsets'''] , _snake_case )
def snake_case_ ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowercase : Tuple = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : List[str] = F'{text_of_1_token} {text_of_1_token}'
__lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Optional[int] = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : int = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : List[Any] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : List[Any] = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Any = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_snake_case ), len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Union[str, Any] = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Optional[Any] = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : Tuple = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_snake_case , use_fast=_snake_case , add_prefix_space=_snake_case , trim_offsets=_snake_case )
__lowercase : str = tokenizer_r(_snake_case , return_offsets_mapping=_snake_case , add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_snake_case ), 1 + len(_snake_case ) + 1 + len(_snake_case )) , )
| 156 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase :str = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """t5"""
lowerCAmelCase_ = ["""past_key_values"""]
lowerCAmelCase_ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , A_=32128 , A_=512 , A_=64 , A_=2048 , A_=6 , A_=None , A_=8 , A_=32 , A_=128 , A_=0.1 , A_=1e-6 , A_=1.0 , A_="relu" , A_=True , A_=True , A_=0 , A_=1 , **A_ , )-> Dict:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = d_kv
UpperCamelCase = d_ff
UpperCamelCase = num_layers
UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase = num_heads
UpperCamelCase = relative_attention_num_buckets
UpperCamelCase = relative_attention_max_distance
UpperCamelCase = dropout_rate
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_factor
UpperCamelCase = feed_forward_proj
UpperCamelCase = use_cache
UpperCamelCase = self.feed_forward_proj.split('-' )
UpperCamelCase = act_info[-1]
UpperCamelCase = act_info[0] == 'gated'
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase = 'gelu_new'
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCamelCase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
UpperCamelCase = 'past_encoder_sequence + sequence'
UpperCamelCase = {0: 'batch'}
UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
return common_inputs
@property
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return 13
| 354 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> int:
'''simple docstring'''
super().__init__()
UpperCamelCase = torchvision.models.resnetaaa(pretrained=A_ )
UpperCamelCase = list(model.children() )[:-2]
UpperCamelCase = nn.Sequential(*A_ )
UpperCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = self.pool(self.model(A_ ) )
UpperCamelCase = torch.flatten(A_ , start_dim=2 )
UpperCamelCase = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ , A_ , A_ , A_ )-> Dict:
'''simple docstring'''
UpperCamelCase = [json.loads(A_ ) for l in open(A_ )]
UpperCamelCase = os.path.dirname(A_ )
UpperCamelCase = tokenizer
UpperCamelCase = labels
UpperCamelCase = len(A_ )
UpperCamelCase = max_seq_length
UpperCamelCase = transforms
def __len__( self )-> Union[str, Any]:
'''simple docstring'''
return len(self.data )
def __getitem__( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=A_ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = sentence[0], sentence[1:-1], sentence[-1]
UpperCamelCase = sentence[: self.max_seq_length]
UpperCamelCase = torch.zeros(self.n_classes )
UpperCamelCase = 1
UpperCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
UpperCamelCase = self.transforms(A_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
UpperCamelCase = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def A_( A : Union[str, Any]):
UpperCamelCase = [len(row['sentence']) for row in batch]
UpperCamelCase , UpperCamelCase = len(A), max(A)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
UpperCamelCase = torch.zeros(A , A , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(A , A)):
UpperCamelCase = input_row['sentence']
UpperCamelCase = 1
UpperCamelCase = torch.stack([row['image'] for row in batch])
UpperCamelCase = torch.stack([row['label'] for row in batch])
UpperCamelCase = torch.stack([row['image_start_token'] for row in batch])
UpperCamelCase = torch.stack([row['image_end_token'] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def A_( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def A_( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
])
| 251 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : torch.FloatTensor
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __A = 3 , __A = 3 , __A = ("DownEncoderBlock2D",) , __A = ("UpDecoderBlock2D",) , __A = (64,) , __A = 1 , __A = "silu" , __A = 3 , __A = 32 , __A = 256 , __A = 32 , __A = None , __A = 0.18215 , __A = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCamelCase : Dict = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
lowerCamelCase : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase : Any = nn.Convad(__A , __A , 1 )
lowerCamelCase : Dict = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A )
lowerCamelCase : Optional[int] = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
lowerCamelCase : Optional[Any] = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self , __A , __A = True ):
"""simple docstring"""
lowerCamelCase : Dict = self.encoder(__A )
lowerCamelCase : Union[str, Any] = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self , __A , __A = False , __A = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = self.quantize(__A )
else:
lowerCamelCase : Dict = h
lowerCamelCase : Tuple = self.post_quant_conv(__A )
lowerCamelCase : Any = self.decoder(__A , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self , __A , __A = True ):
"""simple docstring"""
lowerCamelCase : int = sample
lowerCamelCase : Optional[Any] = self.encode(__A ).latents
lowerCamelCase : List[str] = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 283 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , ):
"""simple docstring"""
lowerCamelCase : str = parent
lowerCamelCase : Union[str, Any] = 13
lowerCamelCase : Optional[Any] = 7
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = 99
lowerCamelCase : Tuple = 0
lowerCamelCase : Any = 32
lowerCamelCase : List[Any] = 2
lowerCamelCase : Tuple = 4
lowerCamelCase : List[str] = 0.1
lowerCamelCase : int = 0.1
lowerCamelCase : int = 512
lowerCamelCase : List[Any] = 16
lowerCamelCase : Any = 2
lowerCamelCase : Any = 0.02
lowerCamelCase : List[str] = 3
lowerCamelCase : Tuple = 4
lowerCamelCase : int = "last"
lowerCamelCase : int = True
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 0
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : str = None
if self.use_token_type_ids:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Dict = None
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A )
lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : Dict = model(__A )
lowerCamelCase : Any = [input_ids, input_mask]
lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A )
lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A )
lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A )
lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A )
lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Any = self.num_choices
lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A )
lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__A : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : List[str] = False
def _snake_case ( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFFlaubertModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowerCamelCase : str = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase : Dict = model(__A )[0]
lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
lowerCamelCase : Tuple = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 283 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase : str = 50000
lowercase : str = 5000
lowercase , lowercase : List[str] = os.path.split(__file__)
lowercase : Any = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = dataset[i]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = dataset[i : i + batch_size]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : int = dataset[i]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = dataset[i : i + batch_size]
def _snake_case( ) -> Any:
lowercase : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
lowercase : Dict = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
lowercase : Any = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
lowercase : Optional[int] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
lowercase : str = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , """dataset.arrow""" ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
print("""shuffling dataset""" )
lowercase : str = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
lowercase : str = func(
SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 285 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase : Tuple = """<<<<<<< This should probably be modified because it mentions: """
lowercase : Any = """=======
>>>>>>>
"""
lowercase : List[str] = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase : Any = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __snake_case ( lowerCAmelCase ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : str = parser.add_parser(
"""convert""" ,help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" ,)
train_parser.add_argument(
"""--tfds_path""" ,type=snake_case ,required=snake_case ,help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" ,)
train_parser.add_argument(
"""--datasets_directory""" ,type=snake_case ,required=snake_case ,help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=snake_case )
def __init__( self ,snake_case ,snake_case ,*snake_case ):
'''simple docstring'''
lowercase : Optional[Any] = get_logger("""datasets-cli/converting""" )
lowercase : Optional[int] = tfds_path
lowercase : Dict = datasets_directory
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
lowercase : List[str] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase : Tuple = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
lowercase : Optional[int] = os.path.abspath(self._datasets_directory )
self._logger.info(f"Converting datasets from {abs_tfds_path} to {abs_datasets_path}" )
lowercase : List[Any] = []
lowercase : Optional[int] = []
lowercase : Dict = {}
if os.path.isdir(self._tfds_path ):
lowercase : int = os.listdir(snake_case )
else:
lowercase : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"Looking at file {f_name}" )
lowercase : List[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(snake_case ,encoding="""utf-8""" ) as f:
lowercase : str = f.readlines()
lowercase : Union[str, Any] = []
lowercase : Optional[Any] = False
lowercase : Optional[Any] = False
lowercase : Optional[int] = []
for line in lines:
lowercase : int = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase : Union[str, Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
lowercase : List[Any] = """"""
continue
elif "from absl import logging" in out_line:
lowercase : Optional[int] = """from datasets import logging\n"""
elif "getLogger" in out_line:
lowercase : Any = out_line.replace("""getLogger""" ,"""get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase : Optional[Any] = True
lowercase : Optional[Any] = list(filter(lambda snake_case : e in out_line ,snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + """\n""" )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase : Union[str, Any] = re.sub(snake_case ,snake_case ,snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase : Dict = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" ,snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
lowercase : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"Error converting {out_line.strip()}" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase : Any = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase : Union[str, Any] = f_name.replace(""".py""" ,"""""" )
lowercase : Optional[Any] = os.path.join(snake_case ,snake_case )
lowercase : List[str] = os.path.join(snake_case ,snake_case )
os.makedirs(snake_case ,exist_ok=snake_case )
self._logger.info(f"Adding directory {output_dir}" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.writelines(snake_case )
self._logger.info(f"Converted in {output_file}" )
for utils_file in utils_files:
try:
lowercase : Optional[int] = os.path.basename(snake_case )
lowercase : int = imports_to_builder_map[f_name.replace(""".py""" ,"""""" )]
self._logger.info(f"Moving {dest_folder} to {utils_file}" )
shutil.copy(snake_case ,snake_case )
except KeyError:
self._logger.error(f"Cannot find destination folder for {utils_file}. Please copy manually." )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'." )
| 285 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase__ = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 68 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = 42
UpperCAmelCase__ : List[str] = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 371 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : str = "realm"
def __init__(self : Optional[int] , _A : Optional[Any]=3_0_5_2_2 , _A : Tuple=7_6_8 , _A : List[str]=1_2_8 , _A : Optional[Any]=1_2 , _A : Dict=1_2 , _A : Tuple=8 , _A : Dict=3_0_7_2 , _A : Union[str, Any]="gelu_new" , _A : Any=0.1 , _A : int=0.1 , _A : Union[str, Any]=5_1_2 , _A : List[str]=2 , _A : Any=0.02 , _A : int=1E-12 , _A : Tuple=2_5_6 , _A : Optional[Any]=1_0 , _A : Any=1E-3 , _A : int=5 , _A : int=3_2_0 , _A : Dict=1_3_3_5_3_7_1_8 , _A : Any=5_0_0_0 , _A : Union[str, Any]=1 , _A : Dict=0 , _A : int=2 , **_A : Union[str, Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
# Common config
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = retriever_proj_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = num_candidates
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = type_vocab_size
snake_case = layer_norm_eps
# Reader config
snake_case = span_hidden_size
snake_case = max_span_width
snake_case = reader_layer_norm_eps
snake_case = reader_beam_size
snake_case = reader_seq_len
# Retrieval config
snake_case = num_block_records
snake_case = searcher_beam_size
| 137 | 0 |
import datasets
from .evaluate import evaluate
UpperCamelCase__ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
UpperCamelCase__ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
UpperCamelCase__ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__lowerCAmelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__lowerCAmelCase = evaluate(dataset=_A , predictions=_A )
return score
| 92 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Any =logging.get_logger(__name__)
def _lowerCAmelCase (_lowerCAmelCase):
print("Loading config file...")
def flatten_yaml_as_dict(_lowerCAmelCase , _lowerCAmelCase="" , _lowerCAmelCase="."):
UpperCamelCase_ = []
for k, v in d.items():
UpperCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(_lowerCAmelCase , collections.abc.MutableMapping):
items.extend(flatten_yaml_as_dict(_lowerCAmelCase , _lowerCAmelCase , sep=_lowerCAmelCase).items())
else:
items.append((new_key, v))
return dict(_lowerCAmelCase)
UpperCamelCase_ = argparse.Namespace()
with open(_lowerCAmelCase , "r") as yaml_file:
try:
UpperCamelCase_ = yaml.load(_lowerCAmelCase , Loader=yaml.FullLoader)
UpperCamelCase_ = flatten_yaml_as_dict(_lowerCAmelCase)
for k, v in flat_cfg.items():
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(_lowerCAmelCase , str(_lowerCAmelCase)))
return config
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = MobileViTVaConfig()
UpperCamelCase_ = False
# dataset
if task_name.startswith("imagenet1k_"):
UpperCamelCase_ = 10_00
if int(task_name.strip().split("_")[-1]) == 3_84:
UpperCamelCase_ = 3_84
else:
UpperCamelCase_ = 2_56
UpperCamelCase_ = "imagenet-1k-id2label.json"
elif task_name.startswith("imagenet21k_to_1k_"):
UpperCamelCase_ = 2_10_00
if int(task_name.strip().split("_")[-1]) == 3_84:
UpperCamelCase_ = 3_84
else:
UpperCamelCase_ = 2_56
UpperCamelCase_ = "imagenet-22k-id2label.json"
elif task_name.startswith("ade20k_"):
UpperCamelCase_ = 1_51
UpperCamelCase_ = 5_12
UpperCamelCase_ = "ade20k-id2label.json"
UpperCamelCase_ = True
elif task_name.startswith("voc_"):
UpperCamelCase_ = 21
UpperCamelCase_ = 5_12
UpperCamelCase_ = "pascal-voc-id2label.json"
UpperCamelCase_ = True
# orig_config
UpperCamelCase_ = load_orig_config_file(_lowerCAmelCase)
assert getattr(_lowerCAmelCase , "model.classification.name" , -1) == "mobilevit_v2", "Invalid model"
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.classification.mitv2.width_multiplier" , 1.0)
assert (
getattr(_lowerCAmelCase , "model.classification.mitv2.attn_norm_layer" , -1) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.classification.activation.name" , "swish")
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.output_stride" , 16)
if "_deeplabv3" in task_name:
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36])
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_out_channels" , 5_12)
UpperCamelCase_ = getattr(_lowerCAmelCase , "model.segmentation.deeplabv3.aspp_dropout" , 0.1)
# id2label
UpperCamelCase_ = "huggingface/label-files"
UpperCamelCase_ = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset") , "r"))
UpperCamelCase_ = {int(_lowerCAmelCase): v for k, v in idalabel.items()}
UpperCamelCase_ = idalabel
UpperCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = dct.pop(_lowerCAmelCase)
UpperCamelCase_ = val
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=False):
if base_model:
UpperCamelCase_ = ""
else:
UpperCamelCase_ = "mobilevitv2."
UpperCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCamelCase_ = k[8:]
else:
UpperCamelCase_ = k
if ".block." in k:
UpperCamelCase_ = k_new.replace(".block." , ".")
if ".conv." in k:
UpperCamelCase_ = k_new.replace(".conv." , ".convolution.")
if ".norm." in k:
UpperCamelCase_ = k_new.replace(".norm." , ".normalization.")
if "conv_1." in k:
UpperCamelCase_ = k_new.replace("conv_1." , f"""{model_prefix}conv_stem.""")
for i in [1, 2]:
if f"""layer_{i}.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""")
if ".exp_1x1." in k:
UpperCamelCase_ = k_new.replace(".exp_1x1." , ".expand_1x1.")
if ".red_1x1." in k:
UpperCamelCase_ = k_new.replace(".red_1x1." , ".reduce_1x1.")
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""")
if f"""layer_{i}.1.local_rep.0.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""")
if f"""layer_{i}.1.local_rep.1.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""")
for i in [3, 4, 5]:
if i == 3:
UpperCamelCase_ = [0, 1]
elif i == 4:
UpperCamelCase_ = [0, 1, 2, 3]
elif i == 5:
UpperCamelCase_ = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
UpperCamelCase_ = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""")
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
UpperCamelCase_ = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""")
if f"""layer_{i}.1.conv_proj.""" in k:
UpperCamelCase_ = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""")
if "pre_norm_attn.0." in k:
UpperCamelCase_ = k_new.replace("pre_norm_attn.0." , "layernorm_before.")
if "pre_norm_attn.1." in k:
UpperCamelCase_ = k_new.replace("pre_norm_attn.1." , "attention.")
if "pre_norm_ffn.0." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.0." , "layernorm_after.")
if "pre_norm_ffn.1." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.1." , "ffn.conv1.")
if "pre_norm_ffn.3." in k:
UpperCamelCase_ = k_new.replace("pre_norm_ffn.3." , "ffn.conv2.")
if "classifier.1." in k:
UpperCamelCase_ = k_new.replace("classifier.1." , "classifier.")
if "seg_head." in k:
UpperCamelCase_ = k_new.replace("seg_head." , "segmentation_head.")
if ".aspp_layer." in k:
UpperCamelCase_ = k_new.replace(".aspp_layer." , ".")
if ".aspp_pool." in k:
UpperCamelCase_ = k_new.replace(".aspp_pool." , ".")
rename_keys.append((k, k_new))
return rename_keys
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head."):
keys_to_ignore.append(_lowerCAmelCase)
for k in keys_to_ignore:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase)
def _lowerCAmelCase ():
UpperCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCamelCase_ = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase).raw)
return im
@torch.no_grad()
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = get_mobilevitva_config(_lowerCAmelCase , _lowerCAmelCase)
# load original state_dict
UpperCamelCase_ = torch.load(_lowerCAmelCase , map_location="cpu")
# load huggingface model
if task_name.startswith("ade20k_") or task_name.startswith("voc_"):
UpperCamelCase_ = MobileViTVaForSemanticSegmentation(_lowerCAmelCase).eval()
UpperCamelCase_ = False
else:
UpperCamelCase_ = MobileViTVaForImageClassification(_lowerCAmelCase).eval()
UpperCamelCase_ = False
# remove and rename some keys of load the original model
UpperCamelCase_ = checkpoint
remove_unused_keys(_lowerCAmelCase)
UpperCamelCase_ = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase)
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
# load modified state_dict
model.load_state_dict(_lowerCAmelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
UpperCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt")
UpperCamelCase_ = model(**_lowerCAmelCase)
# verify classification model
if task_name.startswith("imagenet"):
UpperCamelCase_ = outputs.logits
UpperCamelCase_ = logits.argmax(-1).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx])
if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCamelCase_ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01])
assert torch.allclose(logits[0, :3] , _lowerCAmelCase , atol=1e-4)
Path(_lowerCAmelCase).mkdir(exist_ok=_lowerCAmelCase)
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(_lowerCAmelCase)
print(f"""Saving image processor to {pytorch_dump_folder_path}""")
image_processor.save_pretrained(_lowerCAmelCase)
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCAmelCase : Any =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 128 | 0 |
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 369 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'segformer'
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Any=4 , UpperCamelCase_: int=[2, 2, 2, 2] , UpperCamelCase_: Optional[Any]=[8, 4, 2, 1] , UpperCamelCase_: Union[str, Any]=[32, 64, 1_60, 2_56] , UpperCamelCase_: int=[7, 3, 3, 3] , UpperCamelCase_: Dict=[4, 2, 2, 2] , UpperCamelCase_: str=[1, 2, 5, 8] , UpperCamelCase_: List[str]=[4, 4, 4, 4] , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: List[Any]=0.1 , UpperCamelCase_: Optional[int]=1E-6 , UpperCamelCase_: Optional[int]=2_56 , UpperCamelCase_: Optional[Any]=2_55 , **UpperCamelCase_: List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase_ , )
__lowerCamelCase = num_channels
__lowerCamelCase = num_encoder_blocks
__lowerCamelCase = depths
__lowerCamelCase = sr_ratios
__lowerCamelCase = hidden_sizes
__lowerCamelCase = patch_sizes
__lowerCamelCase = strides
__lowerCamelCase = mlp_ratios
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = classifier_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = drop_path_rate
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = decoder_hidden_size
__lowerCamelCase = kwargs.get("""reshape_last_stage""" , UpperCamelCase_ )
__lowerCamelCase = semantic_loss_ignore_index
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Any = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(UpperCamelCase_ )
lowercase_ = flatten_dict(UpperCamelCase_ )
return flax_params
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
lowercase_ = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase_ = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase_ = new_key.replace(UpperCamelCase_ , UpperCamelCase_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase_ = new_key.replace(UpperCamelCase_ , UpperCamelCase_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase_ = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , UpperCamelCase_ )
lowercase_ = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase_ = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , UpperCamelCase_ )
lowercase_ = flax_dict[key]
lowercase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase_ = torch.from_numpy(converted_dict[key].T )
else:
lowercase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> int:
'''simple docstring'''
lowercase_ = get_flax_param(UpperCamelCase_ )
if not use_large:
lowercase_ = PixaStructVisionConfig()
lowercase_ = PixaStructTextConfig()
else:
lowercase_ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase_ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowercase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCamelCase_ )
lowercase_ = PixaStructForConditionalGeneration(UpperCamelCase_ )
lowercase_ = rename_and_convert_flax_params(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
lowercase_ = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
lowercase_ = PixaStructImageProcessor()
lowercase_ = PixaStructProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
if use_large:
lowercase_ = 40_96
lowercase_ = True
# mkdir if needed
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
processor.save_pretrained(UpperCamelCase_ )
print("""Model saved in {}""".format(UpperCamelCase_ ) )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
UpperCAmelCase : Tuple = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 136 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : List[str] = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A ( _a ):
lowercase_ = 'wav2vec2'
def __init__( self : str , lowerCAmelCase_ : List[str]=32 , lowerCAmelCase_ : str=7_68 , lowerCAmelCase_ : Tuple=12 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : Union[str, Any]=30_72 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Optional[int]=0.0_2 , lowerCAmelCase_ : Optional[Any]=1e-5 , lowerCAmelCase_ : Dict="group" , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase_ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase_ : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : List[Any]=1_28 , lowerCAmelCase_ : Any=16 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[Any]=0.0_5 , lowerCAmelCase_ : Tuple=10 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Union[str, Any]=10 , lowerCAmelCase_ : Union[str, Any]=0 , lowerCAmelCase_ : int=3_20 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=1_00 , lowerCAmelCase_ : Tuple=2_56 , lowerCAmelCase_ : Dict=2_56 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Any="sum" , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=2_56 , lowerCAmelCase_ : List[str]=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , lowerCAmelCase_ : Union[str, Any]=(1, 2, 3, 1, 1) , lowerCAmelCase_ : Dict=5_12 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : str , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
_a = hidden_size
_a = feat_extract_norm
_a = feat_extract_activation
_a = list(lowerCAmelCase_ )
_a = list(lowerCAmelCase_ )
_a = list(lowerCAmelCase_ )
_a = conv_bias
_a = num_conv_pos_embeddings
_a = num_conv_pos_embedding_groups
_a = len(self.conv_dim )
_a = num_hidden_layers
_a = intermediate_size
_a = hidden_act
_a = num_attention_heads
_a = hidden_dropout
_a = attention_dropout
_a = activation_dropout
_a = feat_proj_dropout
_a = final_dropout
_a = layerdrop
_a = layer_norm_eps
_a = initializer_range
_a = vocab_size
_a = do_stable_layer_norm
_a = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a = apply_spec_augment
_a = mask_time_prob
_a = mask_time_length
_a = mask_time_min_masks
_a = mask_feature_prob
_a = mask_feature_length
_a = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_a = num_codevectors_per_group
_a = num_codevector_groups
_a = contrastive_logits_temperature
_a = feat_quantizer_dropout
_a = num_negatives
_a = codevector_dim
_a = proj_codevector_dim
_a = diversity_loss_weight
# ctc loss
_a = ctc_loss_reduction
_a = ctc_zero_infinity
# adapter
_a = add_adapter
_a = adapter_kernel_size
_a = adapter_stride
_a = num_adapter_layers
_a = output_hidden_size or hidden_size
_a = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_a = list(lowerCAmelCase_ )
_a = list(lowerCAmelCase_ )
_a = list(lowerCAmelCase_ )
_a = xvector_output_dim
@property
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 179 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class A :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=99 , lowerCAmelCase_ : Optional[int]=13 , lowerCAmelCase_ : Tuple=16 , lowerCAmelCase_ : Optional[int]=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : Optional[Any]=30 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
_a = parent
_a = batch_size
_a = decoder_seq_length
# For common tests
_a = self.decoder_seq_length
_a = is_training
_a = use_attention_mask
_a = use_labels
_a = vocab_size
_a = d_model
_a = d_model
_a = decoder_layers
_a = decoder_layers
_a = decoder_ffn_dim
_a = decoder_attention_heads
_a = decoder_attention_heads
_a = eos_token_id
_a = bos_token_id
_a = pad_token_id
_a = decoder_start_token_id
_a = use_cache
_a = max_position_embeddings
_a = None
_a = decoder_seq_length
_a = 2
_a = 1
def __lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_a = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , ) -> int:
"""simple docstring"""
_a = True
_a = TrOCRDecoder(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval()
_a = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_a = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_a = model(lowerCAmelCase_ )
_a = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) )
self.parent.assertTrue(len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ) + 1 )
_a = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_a = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = model(lowerCAmelCase_ )['''last_hidden_state''']
_a = model(lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )['''last_hidden_state''']
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
def __lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase_ = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase_ = True
lowercase_ = False
def __lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
_a = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCAmelCase_ )
_a = ConfigTester(self , config_class=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
| 179 | 1 |
'''simple docstring'''
import argparse
lowercase__ : int = '''docs/source/_static/js/custom.js'''
def _lowerCAmelCase ( __snake_case : int ) -> List[str]:
with open(__snake_case , encoding='utf-8' , newline='\n' ) as f:
__A : str = f.readlines()
__A : str = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__A : Dict = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
lowercase__ : Optional[Any] = parser.parse_args()
update_custom_js(args.version) | 190 |
'''simple docstring'''
lowercase__ : Dict = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
} | 190 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCamelCase (UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 3_8_4
if "tiny" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 9, 3]
SCREAMING_SNAKE_CASE = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 2_7, 3]
SCREAMING_SNAKE_CASE = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 2_7, 3]
SCREAMING_SNAKE_CASE = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
SCREAMING_SNAKE_CASE = 5_1_2
if "large" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 2_7, 3]
SCREAMING_SNAKE_CASE = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
SCREAMING_SNAKE_CASE = 7_6_8
if "xlarge" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 2_7, 3]
SCREAMING_SNAKE_CASE = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
SCREAMING_SNAKE_CASE = 1_0_2_4
# set label information
SCREAMING_SNAKE_CASE = 1_5_0
SCREAMING_SNAKE_CASE = "huggingface/label-files"
SCREAMING_SNAKE_CASE = "ade20k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = ConvNextConfig(
depths=UpperCAmelCase__ , hidden_sizes=UpperCAmelCase__ , out_features=["stage1", "stage2", "stage3", "stage4"] )
SCREAMING_SNAKE_CASE = UperNetConfig(
backbone_config=UpperCAmelCase__ , auxiliary_in_channels=UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ , )
return config
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = []
# fmt: off
# stem
rename_keys.append(("backbone.downsample_layers.0.0.weight", "backbone.embeddings.patch_embeddings.weight") )
rename_keys.append(("backbone.downsample_layers.0.0.bias", "backbone.embeddings.patch_embeddings.bias") )
rename_keys.append(("backbone.downsample_layers.0.1.weight", "backbone.embeddings.layernorm.weight") )
rename_keys.append(("backbone.downsample_layers.0.1.bias", "backbone.embeddings.layernorm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.{j}.gamma", F"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.weight", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.depthwise_conv.bias", F"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.weight", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.norm.bias", F"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv1.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.weight", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((F"backbone.stages.{i}.{j}.pointwise_conv2.bias", F"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((F"backbone.downsample_layers.{i}.0.weight", F"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.0.bias", F"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.weight", F"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((F"backbone.downsample_layers.{i}.1.bias", F"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] ):
SCREAMING_SNAKE_CASE = {
"upernet-convnext-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth",
"upernet-convnext-small": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth",
"upernet-convnext-base": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth",
"upernet-convnext-large": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth",
"upernet-convnext-xlarge": "https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth",
}
SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["state_dict"]
SCREAMING_SNAKE_CASE = get_upernet_config(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = UperNetForSemanticSegmentation(UpperCAmelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE = state_dict.pop(UpperCAmelCase__ )
if "bn" in key:
SCREAMING_SNAKE_CASE = key.replace("bn" , "batch_norm" )
SCREAMING_SNAKE_CASE = val
# rename keys
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
# verify on image
SCREAMING_SNAKE_CASE = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("RGB" )
SCREAMING_SNAKE_CASE = SegformerImageProcessor()
SCREAMING_SNAKE_CASE = processor(UpperCAmelCase__ , return_tensors="pt" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
if model_name == "upernet-convnext-tiny":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
SCREAMING_SNAKE_CASE = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCAmelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 363 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : List[Any] = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowerCamelCase : Optional[Any] = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowerCamelCase : Any = {f"""funnel-transformer/{name}""": 5_12 for name in _model_names}
_lowerCamelCase : Optional[Any] = {f"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class lowercase ( a ):
lowercase__ : Optional[int] = VOCAB_FILES_NAMES
lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowercase__ : Union[str, Any] = FunnelTokenizer
lowercase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : int = 2
def __init__( self : str , _UpperCamelCase : str=None , _UpperCamelCase : str=None , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : str="<unk>" , _UpperCamelCase : Optional[Any]="<sep>" , _UpperCamelCase : Optional[int]="<pad>" , _UpperCamelCase : int="<cls>" , _UpperCamelCase : Dict="<mask>" , _UpperCamelCase : Union[str, Any]="<s>" , _UpperCamelCase : Optional[int]="</s>" , _UpperCamelCase : Dict=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=None , _UpperCamelCase : Dict="##" , **_UpperCamelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , clean_text=_UpperCamelCase , tokenize_chinese_chars=_UpperCamelCase , strip_accents=_UpperCamelCase , wordpieces_prefix=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCamelCase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(_UpperCamelCase , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_lower_case
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict=None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case( self : int , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 206 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self):
lowercase = ''''''
lowercase = ''''''
lowercase = []
lowercase = 0
lowercase = 2_5_6
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
def A__ ( self ,A__):
lowercase = cva.imread(A__ ,0)
lowercase = copy.deepcopy(self.img)
lowercase , lowercase , lowercase = plt.hist(self.img.ravel() ,2_5_6 ,[0, 2_5_6] ,label='''x''')
lowercase = np.sum(A__)
for i in range(len(A__)):
lowercase = x[i] / self.k
self.sk += prk
lowercase = (self.L - 1) * self.sk
if self.rem != 0:
lowercase = int(last % last)
lowercase = int(last + 1 if self.rem >= 0.5 else last)
self.last_list.append(A__)
lowercase = int(np.ma.count(self.img) / self.img[1].size)
lowercase = self.img[1].size
for i in range(self.number_of_cols):
for j in range(self.number_of_rows):
lowercase = self.img[j][i]
if num != self.last_list[num]:
lowercase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' ,self.img)
def A__ ( self):
plt.hist(self.img.ravel() ,2_5_6 ,[0, 2_5_6])
def A__ ( self):
cva.imshow('''Output-Image''' ,self.img)
cva.imshow('''Input-Image''' ,self.original_image)
cva.waitKey(5_0_0_0)
cva.destroyAllWindows()
if __name__ == "__main__":
lowercase__ :List[Any] = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowercase__ :str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 101 |
"""simple docstring"""
from string import ascii_uppercase
_A = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCAmelCase__ : int = """"""
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
while div != 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(__UpperCAmelCase , __UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase__ : Dict = ALPHABET_VALUES[str(__UpperCAmelCase )]
else:
lowerCAmelCase__ : Union[str, Any] = str(__UpperCAmelCase )
new_value += actual_value
lowerCAmelCase__ : Optional[Any] = num // base
lowerCAmelCase__ : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> str:
A__ = 'ylacombe/bark-small'
A__ = tempfile.mkdtemp()
A__ = 'en_speaker_1'
A__ = 'This is a test string'
A__ = 'speaker_embeddings_path.json'
A__ = 'speaker_embeddings'
def snake_case__ ( self ,**__UpperCAmelCase ) -> Dict:
return AutoTokenizer.from_pretrained(self.checkpoint ,**__UpperCAmelCase )
def snake_case__ ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.get_tokenizer()
A__ = BarkProcessor(tokenizer=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
A__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
@slow
def snake_case__ ( self ) -> str:
A__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
processor.save_pretrained(
self.tmpdirname ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,speaker_embeddings_directory=self.speaker_embeddings_directory ,)
A__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
A__ = BarkProcessor.from_pretrained(
self.tmpdirname ,self.speaker_embeddings_dict_path ,bos_token='(BOS)' ,eos_token='(EOS)' ,)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
def snake_case__ ( self ) -> int:
A__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint ,speaker_embeddings_dict_path=self.speaker_embeddings_dict_path ,)
A__ = 35
A__ = 2
A__ = 8
A__ = {
'semantic_prompt': np.ones(__UpperCAmelCase ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
A__ = processor(text=self.input_string ,voice_preset=__UpperCAmelCase )
A__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(__UpperCAmelCase ,np.array([] ) ).tolist() )
# test loading voice preset from npz file
A__ = os.path.join(self.tmpdirname ,'file.npz' )
np.savez(__UpperCAmelCase ,**__UpperCAmelCase )
A__ = processor(text=self.input_string ,voice_preset=__UpperCAmelCase )
A__ = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() ,processed_voice_preset.get(__UpperCAmelCase ,np.array([] ) ).tolist() )
# test loading voice preset from the hub
A__ = processor(text=self.input_string ,voice_preset=self.voice_preset )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.get_tokenizer()
A__ = BarkProcessor(tokenizer=__UpperCAmelCase )
A__ = processor(text=self.input_string )
A__ = tokenizer(
self.input_string ,padding='max_length' ,max_length=2_56 ,add_special_tokens=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase ,)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key].squeeze().tolist() )
| 154 | """simple docstring"""
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
A__ = None
A__ = None
A__ = graph
self._normalize_graph(__UpperCAmelCase ,__UpperCAmelCase )
A__ = len(__UpperCAmelCase )
A__ = None
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
if sources is int:
A__ = [sources]
if sinks is int:
A__ = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
A__ = sources[0]
A__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
A__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
A__ = max_input_flow
A__ = 0
A__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A__ = max_input_flow
A__ = size - 1
def snake_case__ ( self ) -> Optional[int]:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
A__ = algorithm(self )
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> Optional[int]:
A__ = flow_network
A__ = flow_network.verticesCount
A__ = flow_network.sourceIndex
A__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A__ = flow_network.graph
A__ = False
def snake_case__ ( self ) -> Optional[Any]:
if not self.executed:
self._algorithm()
A__ = True
def snake_case__ ( self ) -> Tuple:
pass
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> List[Any]:
super().__init__(__UpperCAmelCase )
# use this to save your result
A__ = -1
def snake_case__ ( self ) -> Any:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class UpperCamelCase__( __A ):
def __init__( self ,__UpperCAmelCase ) -> int:
super().__init__(__UpperCAmelCase )
A__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A__ = [0] * self.verticies_count
A__ = [0] * self.verticies_count
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A__ = 0
while i < len(__UpperCAmelCase ):
A__ = vertices_list[i]
A__ = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(__UpperCAmelCase ) )
A__ = 0
else:
i += 1
A__ = sum(self.preflow[self.source_index] )
def snake_case__ ( self ,__UpperCAmelCase ) -> List[Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase ,__UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
A__ = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case__ ( self ,__UpperCAmelCase ) -> Any:
A__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A__ = self.heights[to_index]
if min_height is not None:
A__ = min_height + 1
if __name__ == "__main__":
__lowerCamelCase = [0]
__lowerCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 154 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: UNetaDModel
_lowerCamelCase: KarrasVeScheduler
def __init__( self : List[Any] ,A_ : UNetaDModel ,A_ : KarrasVeScheduler ) -> Dict:
super().__init__()
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Optional[Any] ,A_ : int = 1 ,A_ : int = 50 ,A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = self.unet.config.sample_size
A = (batch_size, 3, img_size, img_size)
A = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A = randn_tensor(A_ ,generator=A_ ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A = self.scheduler.schedule[t]
A = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A , A = self.scheduler.add_noise_to_input(A_ ,A_ ,generator=A_ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A = self.scheduler.step(A_ ,A_ ,A_ ,A_ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
A = self.scheduler.step_correct(
A_ ,A_ ,A_ ,A_ ,step_output.prev_sample ,step_output['derivative'] ,)
A = step_output.prev_sample
A = (sample / 2 + 0.5).clamp(0 ,1 )
A = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 74 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: Any = logging.get_logger(__name__)
lowerCAmelCase: Any = {'vocab_file': 'vocab.txt'}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase: str = {
'openbmb/cpm-ant-10b': 1_0_2_4,
}
def lowerCamelCase__ ( _A ):
a : Union[str, Any] = collections.OrderedDict()
with open(_A , 'r' , encoding='utf-8' ) as reader:
a : int = reader.readlines()
for index, token in enumerate(_A ):
a : int = token.rstrip('\n' )
a : List[Any] = index
return vocab
class a__( lowerCamelCase__ ):
def __init__( self : Dict , __snake_case : Any , __snake_case : Dict="<unk>" , __snake_case : str=2_00 ):
a : List[Any] = vocab
a : Any = unk_token
a : List[str] = max_input_chars_per_word
def lowercase_ ( self : Optional[int] , __snake_case : Union[str, Any] ):
a : Optional[Any] = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
a : Any = 0
a : Optional[Any] = []
while start < len(__snake_case ):
a : Optional[int] = len(__snake_case )
a : str = None
while start < end:
a : Optional[Any] = ''.join(chars[start:end] )
if substr in self.vocab:
a : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
a : List[str] = end
return sub_tokens
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = False
def __init__( self : Any , __snake_case : str , __snake_case : Tuple="<d>" , __snake_case : List[str]="</d>" , __snake_case : Dict="<s>" , __snake_case : List[Any]="</s>" , __snake_case : int="<pad>" , __snake_case : Any="<unk>" , __snake_case : List[str]="</n>" , __snake_case : int="</_>" , __snake_case : Optional[Any]="left" , **__snake_case : Dict , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
a : Union[str, Any] = bod_token
a : Any = eod_token
a : List[str] = load_vocab(__snake_case )
a : Optional[int] = self.encoder[space_token]
a : str = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a : str = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
a : Tuple = {v: k for k, v in self.encoder.items()}
a : List[str] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowercase_ ( self : Optional[int] ):
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Dict ):
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : Any ):
return self.encoder["\n"]
@property
def lowercase_ ( self : Tuple ):
return len(self.encoder )
def lowercase_ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Union[str, Any] , __snake_case : List[str] ):
a : List[str] = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowercase_ ( self : Union[str, Any] , __snake_case : Optional[Any] , **__snake_case : Optional[Any] ):
a : Optional[int] = [i for i in token_ids if i >= 0]
a : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : int ):
return token in self.encoder
def lowercase_ ( self : int , __snake_case : List[str] ):
return "".join(__snake_case )
def lowercase_ ( self : List[str] , __snake_case : Union[str, Any] ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] = None ):
if os.path.isdir(__snake_case ):
a : Optional[int] = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
a : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
a : Any = 0
if " " in self.encoder:
a : Union[str, Any] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
a : Tuple = self.encoder['\n']
del self.encoder["\n"]
a : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
a : List[Any] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self : Union[str, Any] , __snake_case : List[int] , __snake_case : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case )) | 297 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict ="trajectory_transformer"
lowerCamelCase : Dict =["past_key_values"]
lowerCamelCase : Optional[int] ={
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , lowerCAmelCase : str=1_00 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=2_49 , lowerCAmelCase : Any=6 , lowerCAmelCase : List[Any]=17 , lowerCAmelCase : Tuple=25 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : int=4 , lowerCAmelCase : List[str]=1_28 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[Any]=0.0006 , lowerCAmelCase : Optional[int]=5_12 , lowerCAmelCase : str=0.02 , lowerCAmelCase : Dict=1e-12 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : int=True , lowerCAmelCase : List[str]=1 , lowerCAmelCase : List[str]=5_02_56 , lowerCAmelCase : Optional[Any]=5_02_56 , **lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : Tuple = action_weight
__lowerCAmelCase : Optional[Any] = reward_weight
__lowerCAmelCase : Tuple = value_weight
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = block_size
__lowerCAmelCase : Dict = action_dim
__lowerCAmelCase : Any = observation_dim
__lowerCAmelCase : Dict = transition_dim
__lowerCAmelCase : List[str] = learning_rate
__lowerCAmelCase : Union[str, Any] = n_layer
__lowerCAmelCase : List[Any] = n_head
__lowerCAmelCase : Dict = n_embd
__lowerCAmelCase : int = embd_pdrop
__lowerCAmelCase : Tuple = attn_pdrop
__lowerCAmelCase : int = resid_pdrop
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Any = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = kaiming_initializer_range
__lowerCAmelCase : Optional[int] = use_cache
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
| 139 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : str=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=99 , lowerCAmelCase : Any=32 , lowerCAmelCase : int=5 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Any=5_12 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Tuple=0.02 , lowerCAmelCase : Optional[int]=4 , ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = parent
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : List[Any] = use_attention_mask
__lowerCAmelCase : List[Any] = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Optional[int] = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Dict = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : Optional[int] = num_choices
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Dict = None
if self.use_attention_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : List[str] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : List[str] = config_and_inputs
__lowerCAmelCase : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : Any = True
__lowerCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int =True
lowerCamelCase : Any =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = FlaxBertModel.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 139 | 1 |
'''simple docstring'''
from copy import deepcopy
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : int = None ):
if arr is None and size is not None:
_a : Optional[Any] = size
_a : Union[str, Any] = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError('Either arr or size must be specified' )
def __lowercase ( self : str ,_UpperCAmelCase : Optional[Any] ):
_a : List[Any] = len(SCREAMING_SNAKE_CASE_ )
_a : Dict = deepcopy(SCREAMING_SNAKE_CASE_ )
for i in range(1 ,self.size ):
_a : Dict = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
self.tree[j] += self.tree[i]
def __lowercase ( self : Any ):
_a : List[Any] = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
_a : Optional[Any] = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[int] ):
return index + (index & (-index))
@staticmethod
def __lowercase ( _UpperCAmelCase : Tuple ):
return index - (index & (-index))
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_a : Optional[int] = self.next_(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ):
self.add(SCREAMING_SNAKE_CASE_ ,value - self.get(SCREAMING_SNAKE_CASE_ ) )
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[Any] ):
if right == 0:
return 0
_a : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_a : int = self.prev(SCREAMING_SNAKE_CASE_ )
return result
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ):
return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ )
def __lowercase ( self : Any ,_UpperCAmelCase : str ):
return self.query(SCREAMING_SNAKE_CASE_ ,index + 1 )
def __lowercase ( self : str ,_UpperCAmelCase : Tuple ):
value -= self.tree[0]
if value < 0:
return -1
_a : Optional[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_a : Union[str, Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase__ : Any = getLogger(__name__)
lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict:
'''simple docstring'''
__UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' )
__UpperCamelCase = str(snake_case )
__UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case )
if fpaa:
__UpperCamelCase = model.half()
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__UpperCamelCase = time.time()
# update config with task specific params
use_task_specific_params(snake_case , snake_case )
if prefix is None:
__UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ):
__UpperCamelCase = [prefix + text for text in examples_chunk]
__UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case )
__UpperCamelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , )
__UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__UpperCamelCase = int(time.time() - start_time ) # seconds
__UpperCamelCase = len(snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A_ ( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def A_ ( snake_case : str=True ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__UpperCamelCase , __UpperCamelCase = parser.parse_known_args()
__UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case )
if parsed_args and verbose:
print(f"parsed the following generate kwargs: {parsed_args}" )
__UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__UpperCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__UpperCamelCase = generate_summaries_or_translations(
snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , )
if args.reference_path is None:
return {}
# Compute scores
__UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )]
__UpperCamelCase = score_fn(snake_case , snake_case )
scores.update(snake_case )
if args.dump_args:
scores.update(snake_case )
if args.info:
__UpperCamelCase = args.info
if verbose:
print(snake_case )
if args.score_path is not None:
json.dump(snake_case , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 328 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class snake_case_:
def __init__( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any]=1_3 , UpperCamelCase_ : str=7 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : str=True , UpperCamelCase_ : int=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Any=3_3 , UpperCamelCase_ : List[Any]=3_2 , UpperCamelCase_ : str=5 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : int=3_7 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : int=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Optional[int]=None , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Dict = batch_size
lowerCAmelCase : Dict = seq_length
lowerCAmelCase : Optional[Any] = is_training
lowerCAmelCase : List[Any] = use_input_mask
lowerCAmelCase : Optional[Any] = use_token_type_ids
lowerCAmelCase : str = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : List[Any] = attention_probs_dropout_prob
lowerCAmelCase : Union[str, Any] = max_position_embeddings
lowerCAmelCase : Optional[int] = type_vocab_size
lowerCAmelCase : Any = type_sequence_label_size
lowerCAmelCase : Union[str, Any] = initializer_range
lowerCAmelCase : str = num_labels
lowerCAmelCase : Dict = num_choices
lowerCAmelCase : str = scope
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Dict = None
if self.use_input_mask:
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Optional[int] = None
if self.use_labels:
lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Any = EsmModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
lowerCAmelCase : Dict = model(UpperCamelCase_ )
lowerCAmelCase : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = EsmForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Union[str, Any] = self.num_labels
lowerCAmelCase : Dict = EsmForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : str = config_and_inputs
lowerCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = False
__UpperCamelCase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase = ()
__UpperCamelCase = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : List[str] = EsmModelTester(self )
lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 )
def lowerCamelCase__ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase : Dict = type
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : List[Any] ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Dict = EsmModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase : Dict = EsmEmbeddings(config=UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
lowerCAmelCase : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCAmelCase : List[Any] = create_position_ids_from_input_ids(UpperCamelCase_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase_ , UpperCamelCase_ ) ) )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase : str = EsmEmbeddings(config=UpperCamelCase_ )
lowerCAmelCase : Tuple = torch.empty(2 , 4 , 3_0 )
lowerCAmelCase : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCAmelCase : Union[str, Any] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCAmelCase : Dict = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase_ , UpperCamelCase_ ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCamelCase__ ( self : Optional[int] ):
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def lowerCamelCase__ ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase__ ( self : Optional[int] ):
pass
@require_torch
class snake_case_( a__ ):
@slow
def lowerCamelCase__ ( self : str ):
with torch.no_grad():
lowerCAmelCase : Dict = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowerCAmelCase : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : str = model(UpperCamelCase_ )[0]
lowerCAmelCase : Optional[Any] = 3_3
lowerCAmelCase : Dict = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase_ )
lowerCAmelCase : Any = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : List[Any] ):
with torch.no_grad():
lowerCAmelCase : Optional[int] = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowerCAmelCase : Any = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCAmelCase : str = model(UpperCamelCase_ )[0]
# compare the actual values for a slice.
lowerCAmelCase : str = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 361 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : Tuple = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[Any] = ['''MaskFormerFeatureExtractor''']
snake_case__ : List[Any] = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
snake_case__ : Optional[Any] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 | 0 |
'''simple docstring'''
import qiskit
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = qiskit.Aer.get_backend("""aer_simulator""" )
__UpperCAmelCase : List[str] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCAmelCase : Dict = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 254 | import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __snake_case :
def __init__( self , snake_case__ , snake_case__=14 , snake_case__=7 , snake_case__=True , snake_case__=True , snake_case__=False , snake_case__=True , snake_case__=99 , snake_case__=32 , snake_case__=4 , snake_case__=4 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , ) -> str:
'''simple docstring'''
UpperCAmelCase : str =parent
UpperCAmelCase : Tuple =batch_size
UpperCAmelCase : Optional[int] =seq_length
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : Tuple =use_input_mask
UpperCAmelCase : List[Any] =use_token_type_ids
UpperCAmelCase : Optional[Any] =use_labels
UpperCAmelCase : Union[str, Any] =vocab_size
UpperCAmelCase : List[Any] =hidden_size
UpperCAmelCase : Optional[int] =rotary_dim
UpperCAmelCase : Union[str, Any] =num_hidden_layers
UpperCAmelCase : List[Any] =num_attention_heads
UpperCAmelCase : Dict =intermediate_size
UpperCAmelCase : Union[str, Any] =hidden_act
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : Dict =attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] =max_position_embeddings
UpperCAmelCase : str =initializer_range
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : List[Any] =vocab_size - 1
UpperCAmelCase : Optional[Any] =vocab_size - 1
UpperCAmelCase : List[Any] =vocab_size - 1
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : List[Any] =None
if self.use_input_mask:
UpperCAmelCase : Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] =config_and_inputs
UpperCAmelCase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =20
UpperCAmelCase : Any =model_class_name(snake_case__ )
UpperCAmelCase : str =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : Any =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : List[str] =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, -1:] , attention_mask=snake_case__ , past_key_values=outputs_cache.past_key_values , position_ids=snake_case__ , )
UpperCAmelCase : List[Any] =model(snake_case__ )
UpperCAmelCase : Any =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =20
UpperCAmelCase : Dict =model_class_name(snake_case__ )
UpperCAmelCase : Tuple =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase : Dict =model.init_cache(input_ids.shape[0] , snake_case__ )
UpperCAmelCase : int =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase : Optional[Any] =model(
input_ids[:, :-1] , attention_mask=snake_case__ , past_key_values=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase : str =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=snake_case__ , position_ids=snake_case__ , )
UpperCAmelCase : Any =model(snake_case__ , attention_mask=snake_case__ )
UpperCAmelCase : Dict =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class __snake_case ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__lowerCamelCase : Optional[Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =FlaxGPTJModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@tooslow
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
UpperCAmelCase : Optional[Any] =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=snake_case__ , truncation=snake_case__ )
UpperCAmelCase : Optional[int] =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : str =False
UpperCAmelCase : Union[str, Any] =model.config.eos_token_id
UpperCAmelCase : List[Any] =jax.jit(model.generate )
UpperCAmelCase : Dict =jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase : Any =tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCAmelCase : Tuple =[
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(snake_case__ , snake_case__ )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : Any =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : Any =getattr(snake_case__ , snake_case__ )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Tuple =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : int =0
UpperCAmelCase : Optional[int] =1
UpperCAmelCase : Optional[int] =0
UpperCAmelCase : Union[str, Any] =1
UpperCAmelCase : List[str] =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : Optional[int] =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Any =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , snake_case__ )
UpperCAmelCase : Union[str, Any] =fx_state
with torch.no_grad():
UpperCAmelCase : Any =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : Dict =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
UpperCAmelCase : str =model_class.from_pretrained(snake_case__ , from_pt=snake_case__ )
UpperCAmelCase : int =fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : Union[str, Any] ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase : int =model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase : int =getattr(snake_case__ , snake_case__ )
UpperCAmelCase : Dict =pt_model_class(snake_case__ ).eval()
UpperCAmelCase : str =model_class(snake_case__ , dtype=jnp.floataa )
UpperCAmelCase : Optional[Any] =load_flax_weights_in_pytorch_model(snake_case__ , fx_model.params )
UpperCAmelCase , UpperCAmelCase : Optional[int] =pt_inputs['''input_ids'''].shape
UpperCAmelCase : Optional[int] =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(snake_case__ ):
UpperCAmelCase : str =0
UpperCAmelCase : Any =1
UpperCAmelCase : List[Any] =0
UpperCAmelCase : Tuple =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase : Optional[Any] =pt_model(**snake_case__ ).to_tuple()
UpperCAmelCase : List[Any] =fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
UpperCAmelCase : Tuple =pt_model_class.from_pretrained(snake_case__ , from_flax=snake_case__ )
with torch.no_grad():
UpperCAmelCase : Any =pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(
len(snake_case__ ) , len(snake_case__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(snake_case__ , snake_case__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase : str =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
UpperCAmelCase : Tuple =model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
| 348 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """naver-clova-ix/donut-base-finetuned-docvqa"""
__SCREAMING_SNAKE_CASE = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__SCREAMING_SNAKE_CASE = """document_qa"""
__SCREAMING_SNAKE_CASE = AutoProcessor
__SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel
__SCREAMING_SNAKE_CASE = ["""image""", """text"""]
__SCREAMING_SNAKE_CASE = ["""text"""]
def __init__(self , *__a , **__a ) -> Tuple:
"""simple docstring"""
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__a , **__a )
def UpperCamelCase__ (self , __a , __a ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
UpperCAmelCase__ = task_prompt.replace('{user_input}' , __a )
UpperCAmelCase__ = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors='pt' ).input_ids
UpperCAmelCase__ = self.pre_processor(__a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase__ (self , __a ) -> List[Any]:
"""simple docstring"""
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def UpperCamelCase__ (self , __a ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = self.pre_processor.batch_decode(__a )[0]
UpperCAmelCase__ = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
UpperCAmelCase__ = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
UpperCAmelCase__ = re.sub(r'<.*?>' , '' , __a , count=1 ).strip() # remove first task start token
UpperCAmelCase__ = self.pre_processor.tokenajson(__a )
return sequence["answer"]
| 335 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCamelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase_( snake_case__: int ) -> str:
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(snake_case__ , snake_case__ )
return k
def UpperCamelCase_( snake_case__: dict , snake_case__: dict ) -> PegasusForConditionalGeneration:
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(snake_case__ )
UpperCAmelCase__ = PegasusConfig(**snake_case__ )
UpperCAmelCase__ = PegasusForConditionalGeneration(snake_case__ )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(snake_case__ )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(snake_case__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = mapping['shared.weight']
UpperCAmelCase__ = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ )
UpperCAmelCase__ = [
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def UpperCamelCase_( snake_case__: int="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
UpperCAmelCase__ = tf.train.list_variables(snake_case__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ['Adafactor', 'global_step']
for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(snake_case__ , snake_case__ )
UpperCAmelCase__ = array
return tf_weights
def UpperCamelCase_( snake_case__: str , snake_case__: str ) -> Optional[Any]:
# save tokenizer first
UpperCAmelCase__ = Path(snake_case__ ).parent.name
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings']
UpperCAmelCase__ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case__ )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(snake_case__ )
UpperCAmelCase__ = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(snake_case__ , snake_case__ )
torch_model.save_pretrained(snake_case__ )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
_UpperCamelCase = parser.parse_args()
if args.save_dir is None:
_UpperCamelCase = Path(args.tf_ckpt_path).parent.name
_UpperCamelCase = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 335 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : torch.FloatTensor
class snake_case_ ( __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 3 , _UpperCamelCase : Tuple[str] = ("DownEncoderBlock2D",) , _UpperCamelCase : Tuple[str] = ("UpDecoderBlock2D",) , _UpperCamelCase : Tuple[int] = (6_4,) , _UpperCamelCase : int = 1 , _UpperCamelCase : str = "silu" , _UpperCamelCase : int = 3 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : int = 2_5_6 , _UpperCamelCase : int = 3_2 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : float = 0.18215 , _UpperCamelCase : str = "group" , ) ->int:
super().__init__()
# pass init params to Encoder
snake_case_ = Encoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , down_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , double_z=_UpperCamelCase , )
snake_case_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
snake_case_ = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
snake_case_ = VectorQuantizer(_UpperCamelCase , _UpperCamelCase , beta=0.25 , remap=_UpperCamelCase , sane_index_shape=_UpperCamelCase )
snake_case_ = nn.Convad(_UpperCamelCase , _UpperCamelCase , 1 )
# pass init params to Decoder
snake_case_ = Decoder(
in_channels=_UpperCamelCase , out_channels=_UpperCamelCase , up_block_types=_UpperCamelCase , block_out_channels=_UpperCamelCase , layers_per_block=_UpperCamelCase , act_fn=_UpperCamelCase , norm_num_groups=_UpperCamelCase , norm_type=_UpperCamelCase , )
@apply_forward_hook
def snake_case__( self : Any , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : bool = True ) ->VQEncoderOutput:
snake_case_ = self.encoder(_UpperCamelCase )
snake_case_ = self.quant_conv(_UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_UpperCamelCase )
@apply_forward_hook
def snake_case__( self : str , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : bool = False , _UpperCamelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
snake_case_, snake_case_, snake_case_ = self.quantize(_UpperCamelCase )
else:
snake_case_ = h
snake_case_ = self.post_quant_conv(_UpperCamelCase )
snake_case_ = self.decoder(_UpperCamelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase )
def snake_case__( self : Any , _UpperCamelCase : torch.FloatTensor , _UpperCamelCase : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
snake_case_ = sample
snake_case_ = self.encode(_UpperCamelCase ).latents
snake_case_ = self.decode(_UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCamelCase ) | 8 |
import math
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = 10001 ):
try:
snake_case_ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
snake_case_ = []
snake_case_ = 2
while len(SCREAMING_SNAKE_CASE__ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE__ ):
primes.append(SCREAMING_SNAKE_CASE__ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__ ) - 1]
if __name__ == "__main__":
print(f"""{solution() = }""") | 8 | 1 |
"""simple docstring"""
__A = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
} | 64 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
class _snake_case ( a__ ):
snake_case__ = "bert-generation"
def __init__( self : Optional[int] , UpperCAmelCase : Dict=50358 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=24 , UpperCAmelCase : str=16 , UpperCAmelCase : str=4096 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : int=1E-12 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache | 64 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : str = inspect.getfile(accelerate.test_utils )
__A : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__A : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
__A : List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def UpperCAmelCase_ ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
__A : Tuple = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self ):
print(F"""Found {torch.cuda.device_count()} devices.""" )
__A : Optional[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A , env=os.environ.copy() )
@require_multi_gpu
def UpperCAmelCase_ ( self ):
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
__A : List[Any] = ['torchrun', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = Accelerator()
UpperCAmelCase : Optional[int] = (accelerator.state.process_index + 2, 10)
UpperCAmelCase : List[str] = torch.randint(0, 10, shape).to(accelerator.device)
UpperCAmelCase : List[Any] = ''''''
UpperCAmelCase : Optional[Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCAmelCase : Tuple = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCAmelCase : str = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 280 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280 | 1 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = 9, 14 # noqa: F841
lowerCamelCase_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCamelCase_ = defaultdict(lowercase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCamelCase_ = mst(lowercase )
lowerCamelCase_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCamelCase_ = tuple(answer[:2] )
lowerCamelCase_ = tuple(edge[::-1] )
assert edge in result or reverse in result
| 366 |
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Dict , lowercase : List[str] , lowercase : Dict , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
if index == r:
for j in range(lowercase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCamelCase_ = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Any , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 208 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__A =logging.getLogger(__name__)
@dataclass
class _snake_case :
lowerCAmelCase :str
lowerCAmelCase :List[str]
lowerCAmelCase :Optional[List[str]]
@dataclass
class _snake_case :
lowerCAmelCase :List[int]
lowerCAmelCase :List[int]
lowerCAmelCase :Optional[List[int]] = None
lowerCAmelCase :Optional[List[int]] = None
class _snake_case ( a__ ):
lowerCAmelCase :List[str] = '''train'''
lowerCAmelCase :Tuple = '''dev'''
lowerCAmelCase :Union[str, Any] = '''test'''
class _snake_case :
@staticmethod
def snake_case__ ( _lowerCamelCase , _lowerCamelCase):
raise NotImplementedError
@staticmethod
def snake_case__ ( _lowerCamelCase):
raise NotImplementedError
@staticmethod
def snake_case__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase="[CLS]" , _lowerCamelCase=1 , _lowerCamelCase="[SEP]" , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=-100 , _lowerCamelCase=0 , _lowerCamelCase=True , ):
UpperCAmelCase__ : Union[str, Any] = {label: i for i, label in enumerate(_lowerCamelCase)}
UpperCAmelCase__ : str = []
for ex_index, example in enumerate(_lowerCamelCase):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" , _lowerCamelCase , len(_lowerCamelCase))
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Tuple = []
for word, label in zip(example.words , example.labels):
UpperCAmelCase__ : Optional[int] = tokenizer.tokenize(_lowerCamelCase)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowerCamelCase) > 0:
tokens.extend(_lowerCamelCase)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowerCamelCase) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
UpperCAmelCase__ : Optional[int] = tokenizer.num_special_tokens_to_add()
if len(_lowerCamelCase) > max_seq_length - special_tokens_count:
UpperCAmelCase__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
UpperCAmelCase__ : str = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
UpperCAmelCase__ : Any = [sequence_a_segment_id] * len(_lowerCamelCase)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
UpperCAmelCase__ : Dict = [cls_token] + tokens
UpperCAmelCase__ : Any = [pad_token_label_id] + label_ids
UpperCAmelCase__ : Optional[Any] = [cls_token_segment_id] + segment_ids
UpperCAmelCase__ : str = tokenizer.convert_tokens_to_ids(_lowerCamelCase)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
UpperCAmelCase__ : Optional[int] = [1 if mask_padding_with_zero else 0] * len(_lowerCamelCase)
# Zero-pad up to the sequence length.
UpperCAmelCase__ : int = max_seq_length - len(_lowerCamelCase)
if pad_on_left:
UpperCAmelCase__ : List[Any] = ([pad_token] * padding_length) + input_ids
UpperCAmelCase__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
UpperCAmelCase__ : str = ([pad_token_segment_id] * padding_length) + segment_ids
UpperCAmelCase__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowerCamelCase) == max_seq_length
assert len(_lowerCamelCase) == max_seq_length
assert len(_lowerCamelCase) == max_seq_length
assert len(_lowerCamelCase) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""")
logger.info("""guid: %s""" , example.guid)
logger.info("""tokens: %s""" , """ """.join([str(_lowerCamelCase) for x in tokens]))
logger.info("""input_ids: %s""" , """ """.join([str(_lowerCamelCase) for x in input_ids]))
logger.info("""input_mask: %s""" , """ """.join([str(_lowerCamelCase) for x in input_mask]))
logger.info("""segment_ids: %s""" , """ """.join([str(_lowerCamelCase) for x in segment_ids]))
logger.info("""label_ids: %s""" , """ """.join([str(_lowerCamelCase) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
UpperCAmelCase__ : int = None
features.append(
InputFeatures(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , label_ids=_lowerCamelCase))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _snake_case ( a__ ):
lowerCAmelCase :List[InputFeatures]
lowerCAmelCase :int = nn.CrossEntropyLoss().ignore_index
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=False , _lowerCamelCase = Split.train , ):
# Load data features from cache or dataset file
UpperCAmelCase__ : Union[str, Any] = os.path.join(
_lowerCamelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(_lowerCamelCase)) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase__ : str = cached_features_file + """.lock"""
with FileLock(_lowerCamelCase):
if os.path.exists(_lowerCamelCase) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''')
UpperCAmelCase__ : Optional[Any] = torch.load(_lowerCamelCase)
else:
logger.info(f'''Creating features from dataset file at {data_dir}''')
UpperCAmelCase__ : List[str] = token_classification_task.read_examples_from_file(_lowerCamelCase , _lowerCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
UpperCAmelCase__ : Optional[int] = token_classification_task.convert_examples_to_features(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , cls_token_at_end=bool(model_type in ["""xlnet"""]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == """left""") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''')
torch.save(self.features , _lowerCamelCase)
def __len__( self):
return len(self.features)
def __getitem__( self , _lowerCamelCase):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _snake_case :
lowerCAmelCase :List[InputFeatures]
lowerCAmelCase :int = -100
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase=False , _lowerCamelCase = Split.train , ):
UpperCAmelCase__ : List[str] = token_classification_task.read_examples_from_file(_lowerCamelCase , _lowerCamelCase)
# TODO clean up all this to leverage built-in features of tokenizers
UpperCAmelCase__ : Optional[Any] = token_classification_task.convert_examples_to_features(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , cls_token_at_end=bool(model_type in ["""xlnet"""]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == """left""") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
UpperCAmelCase__ : Any = tf.data.Dataset.from_generator(
_lowerCamelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None]), """attention_mask""": tf.TensorShape([None])},
tf.TensorShape([None]),
) , )
else:
UpperCAmelCase__ : Optional[int] = tf.data.Dataset.from_generator(
_lowerCamelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None]),
"""attention_mask""": tf.TensorShape([None]),
"""token_type_ids""": tf.TensorShape([None]),
},
tf.TensorShape([None]),
) , )
def snake_case__ ( self):
UpperCAmelCase__ : int = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self):
return len(self.features)
def __getitem__( self , _lowerCamelCase):
return self.features[i] | 163 |
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def _UpperCamelCase ( UpperCamelCase__ ):
if hor == 1_2_8:
UpperCAmelCase__ : int = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Tuple = (3_2, 1_2_8, 2_5_6)
UpperCAmelCase__ : Union[str, Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 3_2:
UpperCAmelCase__ : Dict = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
UpperCAmelCase__ : Union[str, Any] = (3_2, 6_4, 1_2_8, 2_5_6)
UpperCAmelCase__ : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
UpperCAmelCase__ : Any = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
UpperCAmelCase__ : Tuple = model.state_dict()
UpperCAmelCase__ : Union[str, Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 1_4,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5_5_3_6,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
UpperCAmelCase__ : List[Any] = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Optional[Any] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : Union[str, Any] = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = {
"""in_channels""": 1_4,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (3_2, 6_4, 1_2_8, 2_5_6),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5_5_3_6,
"""out_channels""": 1_4,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
UpperCAmelCase__ : Tuple = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
UpperCAmelCase__ : Optional[Any] = model
UpperCAmelCase__ : Dict = UNetaDModel(**UpperCamelCase__ )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
UpperCAmelCase__ : Dict = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
UpperCAmelCase__ : str = state_dict.pop(UpperCamelCase__ )
hf_value_function.load_state_dict(UpperCamelCase__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 163 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> Tuple:
# save results
if os.path.exists(snake_case_ ):
if os.path.exists(os.path.join(snake_case_ , """config.json""" ) ) and os.path.isfile(
os.path.join(snake_case_ , """config.json""" ) ):
os.remove(os.path.join(snake_case_ , """config.json""" ) )
if os.path.exists(os.path.join(snake_case_ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(snake_case_ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(snake_case_ , """pytorch_model.bin""" ) )
else:
os.makedirs(snake_case_ )
model.save_pretrained(snake_case_ )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ) -> str:
snake_case = 2
if unlogit:
snake_case = torch.pow(snake_case_ , snake_case_ )
snake_case = p * torch.log(snake_case_ )
snake_case = 0
return -plogp.sum(dim=-1 )
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Optional[int]:
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(snake_case_ ) ) ) )
for row in range(len(snake_case_ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Union[str, Any]=False ) -> int:
snake_case , snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case = torch.zeros(snake_case_ , snake_case_ ).to(args.device )
snake_case = torch.zeros(snake_case_ , snake_case_ ).to(args.device )
if head_mask is None:
snake_case = torch.ones(snake_case_ , snake_case_ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case = None
snake_case = 0.0
snake_case = 0.0
for step, inputs in enumerate(tqdm(snake_case_ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
snake_case = tuple(t.to(args.device ) for t in inputs )
((snake_case ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case = model(snake_case_ , labels=snake_case_ , head_mask=snake_case_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case , snake_case , snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case_ ):
snake_case = entropy(attn.detach() , snake_case_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case = 2
snake_case = torch.pow(torch.pow(snake_case_ , snake_case_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(snake_case_ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(snake_case_ )
logger.info("""Head ranked by importance scores""" )
snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case = torch.arange(
head_importance.numel() , device=args.device )
snake_case = head_ranks.view_as(snake_case_ )
print_ad_tensor(snake_case_ )
return attn_entropy, head_importance, total_loss
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> Any:
snake_case , snake_case , snake_case = compute_heads_importance(snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ )
snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , snake_case_ , original_score * args.masking_threshold )
snake_case = torch.ones_like(snake_case_ )
snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case = original_score
while current_score >= original_score * args.masking_threshold:
snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case = float("""Inf""" )
snake_case = head_importance.view(-1 ).sort()[1]
if len(snake_case_ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
snake_case = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
snake_case = new_head_mask.view(-1 )
snake_case = 0.0
snake_case = new_head_mask.view_as(snake_case_ )
snake_case = new_head_mask.clone().detach()
print_ad_tensor(snake_case_ )
# Compute metric and head importance again
snake_case , snake_case , snake_case = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , head_mask=snake_case_ )
snake_case = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , snake_case_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("""Final head mask""" )
print_ad_tensor(snake_case_ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> Union[str, Any]:
snake_case = datetime.now()
snake_case , snake_case , snake_case = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , compute_importance=snake_case_ , head_mask=snake_case_ )
snake_case = 1 / loss
snake_case = datetime.now() - before_time
snake_case = sum(p.numel() for p in model.parameters() )
snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case = [
v,
]
assert sum(len(snake_case_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case_ )
snake_case = sum(p.numel() for p in model.parameters() )
snake_case = datetime.now()
snake_case , snake_case , snake_case = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , compute_importance=snake_case_ , head_mask=snake_case_ , actually_pruned=snake_case_ , )
snake_case = 1 / loss
snake_case = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , snake_case_ , snake_case_ , pruned_num_params / original_num_params * 1_00 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , snake_case_ , snake_case_ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 )
save_model(snake_case_ , args.output_dir )
def __lowerCamelCase ( ) -> str:
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=snake_case_ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=snake_case_ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=snake_case_ , type=snake_case_ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=snake_case_ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don\'t normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don\'t normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=snake_case_ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=snake_case_ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=snake_case_ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=snake_case_ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=snake_case_ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=snake_case_ , default=42 )
parser.add_argument("""--local_rank""" , type=snake_case_ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=snake_case_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=snake_case_ , default="""""" , help="""Can be used for distant debugging.""" )
snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case = torch.device("""cuda""" , args.local_rank )
snake_case = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case = nn.parallel.DistributedDataParallel(
snake_case_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case_ )
elif args.n_gpu > 1:
snake_case = nn.DataParallel(snake_case_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case_ )
torch.save(snake_case_ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Prepare dataset
snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case = (torch.from_numpy(snake_case_ ),)
snake_case = TensorDataset(*snake_case_ )
snake_case = RandomSampler(snake_case_ )
snake_case = DataLoader(snake_case_ , sampler=snake_case_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case_ , snake_case_ , snake_case_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case = mask_heads(snake_case_ , snake_case_ , snake_case_ )
prune_heads(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 369 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 0 |
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = val
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
_UpperCAmelCase = Node(UpperCAmelCase )
else:
self.left.insert(UpperCAmelCase )
elif val > self.val:
if self.right is None:
_UpperCAmelCase = Node(UpperCAmelCase )
else:
self.right.insert(UpperCAmelCase )
else:
_UpperCAmelCase = val
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
if root:
inorder(root.left , __lowerCAmelCase )
res.append(root.val )
inorder(root.right , __lowerCAmelCase )
def __A ( __lowerCAmelCase )-> List[str]:
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return arr
_UpperCAmelCase = Node(arr[0] )
for i in range(1 , len(__lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_UpperCAmelCase = []
inorder(__lowerCAmelCase , __lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 39 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer
UpperCamelCase__ = AlbertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = True
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = 'this is a test'
_UpperCAmelCase = 'this is a test'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(UpperCAmelCase ) , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(UpperCAmelCase )
_UpperCAmelCase = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase , keep_accents=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [48, 25, 21, 1289] )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AlbertTokenizer(UpperCAmelCase )
_UpperCAmelCase = tokenizer.encode('sequence builders' )
_UpperCAmelCase = tokenizer.encode('multi-sequence build' )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 39 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''levit'''
def __init__( self : List[Any] , snake_case__ : int=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Optional[int]=3 , snake_case__ : Dict=2 , snake_case__ : Dict=1 , snake_case__ : Tuple=1_6 , snake_case__ : Optional[Any]=[1_2_8, 2_5_6, 3_8_4] , snake_case__ : Optional[int]=[4, 8, 1_2] , snake_case__ : List[str]=[4, 4, 4] , snake_case__ : Optional[Any]=[1_6, 1_6, 1_6] , snake_case__ : List[str]=0 , snake_case__ : Dict=[2, 2, 2] , snake_case__ : List[Any]=[2, 2, 2] , snake_case__ : Dict=0.02 , **snake_case__ : int , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCAmelCase__ : List[str] = image_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : List[Any] = kernel_size
UpperCAmelCase__ : List[str] = stride
UpperCAmelCase__ : Tuple = padding
UpperCAmelCase__ : Union[str, Any] = hidden_sizes
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Optional[Any] = depths
UpperCAmelCase__ : List[str] = key_dim
UpperCAmelCase__ : Optional[Any] = drop_path_rate
UpperCAmelCase__ : Dict = patch_size
UpperCAmelCase__ : Optional[int] = attention_ratio
UpperCAmelCase__ : List[Any] = mlp_ratio
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Union[str, Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =version.parse('''1.11''' )
@property
def __a ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return 1e-4
| 298 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
snake_case : str = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
snake_case : Union[str, Any] = json.load(f)
@require_torch
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return FSMTTokenizer.from_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Tuple = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
a :Tuple = F'''facebook/wmt19-{pair}'''
a :Optional[Any] = self.get_tokenizer(_lowerCamelCase )
a :str = self.get_model(_lowerCamelCase )
a :Optional[int] = bleu_data[pair]['''src''']
a :Optional[Any] = bleu_data[pair]['''tgt''']
a :str = tokenizer(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase , padding='''longest''' ).to(_lowerCamelCase )
a :Optional[Any] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
a :str = tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
a :int = calculate_bleu(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
self.assertGreaterEqual(scores['''bleu'''] , _lowerCamelCase )
| 94 |
'''simple docstring'''
import heapq
import sys
import numpy as np
UpperCamelCase = tuple[int, int]
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> str:
'''simple docstring'''
A: Any = []
A: int = set()
def _snake_case ( self : Optional[Any] ) -> int:
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _snake_case ( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.elements ) == 0
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE_ )
else:
# update
# print("update", item)
A: Optional[int] = []
((A) , (A)): str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((A) , (A)): int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> Any:
'''simple docstring'''
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE_ )
A: str = []
((A) , (A)): List[str] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((A) , (A)): Any = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _snake_case ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
return self.elements[0][1]
def _snake_case ( self : int ) -> Union[str, Any]:
'''simple docstring'''
((A) , (A)): Dict = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE_ )
return (priority, item)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Union[str, Any]:
# euclidean distance
A: List[str] = np.array(__lowercase )
A: Optional[int] = np.array(__lowercase )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> int:
# integer division by time variable
return consistent_heuristic(__lowercase , __lowercase ) // t
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
A: int = g_function[start] + Wa * heuristics[i](__lowercase , __lowercase )
return ans
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
A: Union[str, Any] = np.chararray((n, n) )
for i in range(__lowercase ):
for j in range(__lowercase ):
A: Union[str, Any] = '''*'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (j, (n - 1) - i) in blocks:
A: Optional[Any] = '''#'''
A: Tuple = '''-'''
A: List[str] = back_pointer[goal]
while x != start:
((A) , (A)): Tuple = x
# print(x)
A: List[str] = '''-'''
A: str = back_pointer[x]
A: Dict = '''-'''
for i in range(__lowercase ):
for j in range(__lowercase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
A: List[str] = back_pointer[goal]
while x != start:
print(__lowercase , end=''' ''' )
A: Optional[int] = back_pointer[x]
print(__lowercase )
sys.exit()
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[Any]:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Union[str, Any]:
for itera in range(__lowercase ):
open_list[itera].remove_element(__lowercase )
# print("s", s)
# print("j", j)
((A) , (A)): Tuple = s
A: Optional[Any] = (x - 1, y)
A: str = (x + 1, y)
A: List[Any] = (x, y + 1)
A: int = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(__lowercase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(__lowercase )
A: int = -1
A: int = float('''inf''' )
if valid(__lowercase ) and g_function[neighbours] > g_function[s] + 1:
A: List[str] = g_function[s] + 1
A: List[str] = s
if neighbours not in close_list_anchor:
open_list[0].put(__lowercase , key(__lowercase , 0 , __lowercase , __lowercase ) )
if neighbours not in close_list_inad:
for var in range(1 , __lowercase ):
if key(__lowercase , __lowercase , __lowercase , __lowercase ) <= Wa * key(
__lowercase , 0 , __lowercase , __lowercase ):
open_list[j].put(
__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
def SCREAMING_SNAKE_CASE( ) -> Tuple:
A: str = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
UpperCamelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase = make_common_ground()
UpperCamelCase = blocks_blk
# hyper parameters
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = 20
UpperCamelCase = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase = (0, 0)
UpperCamelCase = (n - 1, n - 1)
UpperCamelCase = 1
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> int:
A: int = {start: 0, goal: float('''inf''' )}
A: Union[str, Any] = {start: -1, goal: -1}
A: List[Any] = []
A: Union[str, Any] = set()
for i in range(__lowercase ):
open_list.append(PriorityQueue() )
open_list[i].put(__lowercase , key(__lowercase , __lowercase , __lowercase , __lowercase ) )
A: list[int] = []
A: list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , __lowercase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A , A: Union[str, Any] = open_list[i].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_inad.append(__lowercase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(__lowercase , __lowercase , __lowercase )
else:
A: Union[str, Any] = open_list[0].top_show()
visited.add(__lowercase )
expand_state(
__lowercase , 0 , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , )
close_list_anchor.append(__lowercase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(__lowercase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 319 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Optional[int]:
# Initialise PyTorch model
__UpperCAmelCase : Tuple = AlbertConfig.from_json_file(snake_case__ )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase : Optional[Any] = AlbertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(snake_case__, snake_case__, snake_case__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(), snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 350 | import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
def __A ( __lowerCAmelCase )-> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = os.path.join(__lowerCAmelCase , 'all_results.json' )
if os.path.exists(__lowerCAmelCase ):
with open(__lowerCAmelCase , 'r' ) as f:
_UpperCAmelCase = json.load(__lowerCAmelCase )
else:
raise ValueError(F"""can't find {path}""" )
return results
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
import xla_spawn
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(UpperCAmelCase , 'argv' , UpperCAmelCase ):
_UpperCAmelCase = time()
xla_spawn.main()
_UpperCAmelCase = time()
_UpperCAmelCase = get_results(UpperCAmelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCamelCase ( self ):
"""simple docstring"""
import xla_spawn
_UpperCAmelCase = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(UpperCAmelCase , 'argv' , UpperCAmelCase ):
xla_spawn.main()
| 39 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False )-> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = ''
else:
_UpperCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = dct.pop(__lowerCAmelCase )
_UpperCAmelCase = val
def __A ( )-> str:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
_UpperCAmelCase = 8
# set labels if required
if not base_model:
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
_UpperCAmelCase = 384
_UpperCAmelCase = 1_536
_UpperCAmelCase = 12
_UpperCAmelCase = 6
# load original model from torch hub
_UpperCAmelCase = torch.hub.load('facebookresearch/dino:main' , __lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_UpperCAmelCase = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if base_model:
_UpperCAmelCase = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase ).eval()
else:
_UpperCAmelCase = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
_UpperCAmelCase = ViTImageProcessor()
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_UpperCAmelCase = encoding['pixel_values']
_UpperCAmelCase = model(__lowerCAmelCase )
if base_model:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert torch.allclose(__lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
_UpperCAmelCase = original_model(__lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_a = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 39 | 1 |
"""simple docstring"""
def UpperCAmelCase ( a_, a_ = False ):
'''simple docstring'''
if not isinstance(a_, a_ ):
lowerCamelCase : Any = F"""Expected string as input, found {type(a_ )}"""
raise ValueError(a_ )
if not isinstance(a_, a_ ):
lowerCamelCase : Any = F"""Expected boolean as use_pascal parameter, found {type(a_ )}"""
raise ValueError(a_ )
lowerCamelCase : List[Any] = input_str.split('_' )
lowerCamelCase : str = 0 if use_pascal else 1
lowerCamelCase : Union[str, Any] = words[start_index:]
lowerCamelCase : int = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase : Any = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 205 |
"""simple docstring"""
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
while b:
lowerCamelCase , lowerCamelCase : Tuple = b, a % b
return a
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(a_, a % b )
def UpperCAmelCase ( ):
'''simple docstring'''
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" )
if __name__ == "__main__":
main()
| 205 | 1 |
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCAmelCase_ ( snake_case_=None ):
if subparsers is not None:
_A : Tuple = subparsers.add_parser("""env""" )
else:
_A : Optional[int] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""",default=_lowerCamelCase,help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def lowerCAmelCase_ ( snake_case_ ):
_A : int = torch.__version__
_A : Optional[Any] = torch.cuda.is_available()
_A : Union[str, Any] = is_xpu_available()
_A : Optional[int] = is_npu_available()
_A : Optional[Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_lowerCamelCase ):
_A : List[str] = load_config_from_file(args.config_file ).to_dict()
_A : Optional[int] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(_lowerCamelCase ),
"""PyTorch NPU available""": str(_lowerCamelCase ),
"""System RAM""": f'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
_A : List[Any] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
_A : List[str] = (
"""\n""".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowerCamelCase,_lowerCamelCase )
else f'''\t{accelerate_config}'''
)
print(_lowerCamelCase )
_A : List[Any] = accelerate_config
return info
def lowerCAmelCase_ ( ):
_A : Any = env_command_parser()
_A : Any = parser.parse_args()
env_command(_lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 26 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
lowerCamelCase_ = {"""mobilebert-uncased""": 5_1_2}
lowerCamelCase_ = {}
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[Any] = VOCAB_FILES_NAMES
__a: Tuple = PRETRAINED_VOCAB_FILES_MAP
__a: List[str] = PRETRAINED_INIT_CONFIGURATION
__a: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a: Optional[int] = MobileBertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
lowerCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowercase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase_ ) != tokenize_chinese_chars
):
lowerCAmelCase_ = getattr(lowercase_ , normalizer_state.pop('type' ) )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = strip_accents
lowerCAmelCase_ = tokenize_chinese_chars
lowerCAmelCase_ = normalizer_class(**lowercase_ )
lowerCAmelCase_ = do_lower_case
def _lowercase ( self , lowercase_ , lowercase_=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 14 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = tmp_path / 'file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = tmp_path / 'malformed_file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ , a_ ) -> List[str]:
lowerCAmelCase_ = tmp_path / 'csv_with_image.csv'
lowerCAmelCase_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = tmp_path / 'csv_with_label.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = Csv()
lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( a_ ) -> Optional[Any]:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCAmelCase_ = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( a_ ) -> int:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1:]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCAmelCase_ = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels]
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCAmelCase_ = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__a: Optional[int] = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **__lowerCAmelCase ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase__ : List[Any] = deprecated_arg[3:]
setattr(self , __lowerCAmelCase , not kwargs.pop(__lowerCAmelCase ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase__ : Dict = kwargs.pop('''torchscript''' , self.torchscript )
lowercase__ : Dict = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowercase__ : Dict = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "Trace the models using torchscript"} )
SCREAMING_SNAKE_CASE = field(default=a__ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
SCREAMING_SNAKE_CASE = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def _lowerCAmelCase( self ) -> Tuple["torch.device", int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowercase__ : int = torch.device('''cpu''' )
lowercase__ : Union[str, Any] = 0
elif is_torch_tpu_available():
lowercase__ : str = xm.xla_device()
lowercase__ : Tuple = 0
else:
lowercase__ : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase__ : Any = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCAmelCase( self ) -> Tuple:
return is_torch_tpu_available() and self.tpu
@property
def _lowerCAmelCase( self ) -> int:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCAmelCase( self ) -> "torch.device":
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def _lowerCAmelCase( self ) -> Tuple:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def _lowerCAmelCase( self ) -> str:
return self.n_gpu > 0
| 198 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Optional[int]:
super().setUp()
lowercase__ : List[Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
lowercase__ : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Optional[Any] = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
lowercase__ : Dict = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : str = '''adapt act apte'''
lowercase__ : Any = '''adapt act apte'''
return input_text, output_text
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Tuple = '''adapt act apte'''
lowercase__ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : Any = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowercase__ : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
lowercase__ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
lowercase__ : str = '''I am a small frog.'''
lowercase__ : Union[str, Any] = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
lowercase__ : List[str] = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
lowercase__ : Optional[Any] = '''I am a small frog .'''
lowercase__ : Any = '''.'''
lowercase__ : List[Any] = tok(__lowerCAmelCase )['''input_ids''']
lowercase__ : Optional[Any] = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 198 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = (DDIMParallelScheduler,)
__snake_case : List[Any] = (("eta", 0.0), ("num_inference_steps", 50))
def SCREAMING_SNAKE_CASE__ ( self : int ,**lowerCamelCase__ : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**lowerCamelCase__ )
return config
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,**lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = 10, 0.0
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = scheduler.step(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([801, 601, 401, 201, 1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase__ ,beta_end=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase__ ,prediction_type=lowerCamelCase__ ,sample_max_value=lowerCamelCase__ ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] ,[10, 50, 500] ):
self.check_over_forward(time_step=lowerCamelCase__ ,num_inference_steps=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
for t, eta in zip([1, 10, 49] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase__ ,eta=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 ,400 ) - 0.14771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 ,960 ) - 0.32460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ,486 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ,998 ) - 0.02 ) ) < 1e-5
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = 10, 0.0
scheduler.set_timesteps(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE = samplea.shape[0]
SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] ,dim=0 )
SCREAMING_SNAKE_CASE = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1 ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(lowerCamelCase__ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.223967 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.01 )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=lowerCamelCase__ ,beta_start=0.01 )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 193 |
import json
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = results[benchmark_name]
SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
SCREAMING_SNAKE_CASE = """| metric |"""
SCREAMING_SNAKE_CASE = """|--------|"""
SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE = metric_vals["""new"""]
SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = sys.argv[1]
SCREAMING_SNAKE_CASE_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 193 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __lowercase ( snake_case_ : Any=None ) ->Optional[int]:
'''simple docstring'''
if subparsers is not None:
__A : int = subparsers.add_parser('''env''' )
else:
__A : List[str] = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' ,default=SCREAMING_SNAKE_CASE__ ,help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def __lowercase ( snake_case_ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
__A : List[str] = torch.__version__
__A : List[str] = torch.cuda.is_available()
__A : Union[str, Any] = is_xpu_available()
__A : int = is_npu_available()
__A : Dict = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__ ):
__A : str = load_config_from_file(args.config_file ).to_dict()
__A : Tuple = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(SCREAMING_SNAKE_CASE__ ),
'''PyTorch NPU available''': str(SCREAMING_SNAKE_CASE__ ),
'''System RAM''': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__A : Dict = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__A : List[Any] = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
else F"""\t{accelerate_config}"""
)
print(SCREAMING_SNAKE_CASE__ )
__A : Any = accelerate_config
return info
def __lowercase ( ) ->int:
'''simple docstring'''
__A : Optional[Any] = env_command_parser()
__A : Any = parser.parse_args()
env_command(SCREAMING_SNAKE_CASE__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 179 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (DPMSolverSinglestepScheduler,)
lowerCamelCase = (('num_inference_steps', 25),)
def snake_case__ ( self : Tuple,**lowercase_ : Dict )-> Optional[int]:
'''simple docstring'''
A__ = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'prediction_type': 'epsilon',
'thresholding': False,
'sample_max_value': 1.0,
'algorithm_type': 'dpmsolver++',
'solver_type': 'midpoint',
'lambda_min_clipped': -float('inf' ),
'variance_type': None,
}
config.update(**lowercase_ )
return config
def snake_case__ ( self : str,lowercase_ : Optional[Any]=0,**lowercase_ : Any )-> List[Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ , A__ = sample, sample
for t in range(lowercase_,time_step + scheduler.config.solver_order + 1 ):
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : List[str] )-> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple,lowercase_ : Union[str, Any]=0,**lowercase_ : Union[str, Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=None,**lowercase_ : int )-> int:
'''simple docstring'''
if scheduler is None:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
return sample
def snake_case__ ( self : Any )-> str:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = 5_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1E-3
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
A__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
A__ = DEISMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ = UniPCMultistepScheduler.from_config(scheduler.config )
A__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ = self.full_loop(scheduler=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Tuple )-> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowercase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase_,prediction_type=lowercase_,sample_max_value=lowercase_,algorithm_type='dpmsolver++',solver_order=lowercase_,solver_type=lowercase_,)
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def snake_case__ ( self : Dict )-> List[Any]:
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
A__ = self.full_loop(
solver_order=lowercase_,solver_type=lowercase_,prediction_type=lowercase_,algorithm_type=lowercase_,)
assert not torch.isnan(lowercase_ ).any(), "Samples have nan numbers"
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowercase_ )
self.check_over_configs(lower_order_final=lowercase_ )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float('inf' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
self.check_over_configs(variance_type=lowercase_ )
self.check_over_configs(variance_type='learned_range' )
def snake_case__ ( self : str )-> Any:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase_,time_step=0 )
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1E-3
def snake_case__ ( self : Any )-> Union[str, Any]:
'''simple docstring'''
A__ = self.full_loop(use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1E-3
def snake_case__ ( self : Union[str, Any] )-> Tuple:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction' )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1E-3
def snake_case__ ( self : Tuple )-> int:
'''simple docstring'''
A__ = self.full_loop(prediction_type='v_prediction',use_karras_sigmas=lowercase_ )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1E-3
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(thresholding=lowercase_,dynamic_thresholding_ratio=0 )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
assert sample.dtype == torch.floataa
| 7 | 0 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''owlvit_text_model'''
def __init__( self ,lowerCamelCase_=4_9_4_0_8 ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=2_0_4_8 ,lowerCamelCase_=1_2 ,lowerCamelCase_=8 ,lowerCamelCase_=1_6 ,lowerCamelCase_="quick_gelu" ,lowerCamelCase_=1E-5 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1.0 ,lowerCamelCase_=0 ,lowerCamelCase_=4_9_4_0_6 ,lowerCamelCase_=4_9_4_0_7 ,**lowerCamelCase_ ,) -> Dict:
super().__init__(pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
A = vocab_size
A = hidden_size
A = intermediate_size
A = num_hidden_layers
A = num_attention_heads
A = max_position_embeddings
A = hidden_act
A = layer_norm_eps
A = attention_dropout
A = initializer_range
A = initializer_factor
@classmethod
def UpperCamelCase__ ( cls ,lowerCamelCase_ ,**lowerCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase_ )
A , A = cls.get_config_dict(lowerCamelCase_ ,**lowerCamelCase_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase_ ,**lowerCamelCase_ )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''owlvit_vision_model'''
def __init__( self ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=3_0_7_2 ,lowerCamelCase_=1_2 ,lowerCamelCase_=1_2 ,lowerCamelCase_=3 ,lowerCamelCase_=7_6_8 ,lowerCamelCase_=3_2 ,lowerCamelCase_="quick_gelu" ,lowerCamelCase_=1E-5 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1.0 ,**lowerCamelCase_ ,) -> int:
super().__init__(**lowerCamelCase_ )
A = hidden_size
A = intermediate_size
A = num_hidden_layers
A = num_attention_heads
A = num_channels
A = image_size
A = patch_size
A = hidden_act
A = layer_norm_eps
A = attention_dropout
A = initializer_range
A = initializer_factor
@classmethod
def UpperCamelCase__ ( cls ,lowerCamelCase_ ,**lowerCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase_ )
A , A = cls.get_config_dict(lowerCamelCase_ ,**lowerCamelCase_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase_ ,**lowerCamelCase_ )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = '''owlvit'''
_lowerCamelCase = True
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=5_1_2 ,lowerCamelCase_=2.65_92 ,lowerCamelCase_=True ,**lowerCamelCase_ ,) -> Optional[int]:
super().__init__(**lowerCamelCase_ )
if text_config is None:
A = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A = OwlViTTextConfig(**lowerCamelCase_ )
A = OwlViTVisionConfig(**lowerCamelCase_ )
A = projection_dim
A = logit_scale_init_value
A = return_dict
A = 1.0
@classmethod
def UpperCamelCase__ ( cls ,lowerCamelCase_ ,**lowerCamelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase_ )
A , A = cls.get_config_dict(lowerCamelCase_ ,**lowerCamelCase_ )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase_ ,**lowerCamelCase_ )
@classmethod
def UpperCamelCase__ ( cls ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]:
A = {}
A = text_config
A = vision_config
return cls.from_dict(lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[Any]:
A = copy.deepcopy(self.__dict__ )
A = self.text_config.to_dict()
A = self.vision_config.to_dict()
A = self.__class__.model_type
return output
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = -1 ,lowerCamelCase_ = -1 ,lowerCamelCase_ = None ,) -> Mapping[str, Any]:
A = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=lowerCamelCase_ ,seq_length=lowerCamelCase_ ,framework=lowerCamelCase_ )
A = super().generate_dummy_inputs(
processor.image_processor ,batch_size=lowerCamelCase_ ,framework=lowerCamelCase_ )
return {**text_input_dict, **image_input_dict}
@property
def UpperCamelCase__ ( self ) -> int:
return 1_4
| 77 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Dict:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,lowerCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 77 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Tuple = 16
A : Tuple = 32
def lowercase_ ( _A : Accelerator , _A : DatasetDict , _A : List[int] , _A : List[int] , _A : int = 16 ):
"""simple docstring"""
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase__ : List[str] = DatasetDict(
{
"train": dataset["train"].select(_A ),
"validation": dataset["train"].select(_A ),
"test": dataset["validation"],
} )
def tokenize_function(_A : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : Tuple = datasets.map(
_A , batched=_A , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_A : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : Any = 8
else:
lowerCamelCase__ : List[str] = None
return tokenizer.pad(
_A , padding="longest" , max_length=_A , pad_to_multiple_of=_A , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCamelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_A , collate_fn=_A , batch_size=_A )
lowerCamelCase__ : Any = DataLoader(
tokenized_datasets["validation"] , shuffle=_A , collate_fn=_A , batch_size=_A )
lowerCamelCase__ : Dict = DataLoader(
tokenized_datasets["test"] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader, test_dataloader
def lowercase_ ( _A : str , _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = []
# Download the dataset
lowerCamelCase__ : List[str] = load_dataset("glue" , "mrpc" )
# Create our splits
lowerCamelCase__ : str = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
lowerCamelCase__ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : List[str] = config["lr"]
lowerCamelCase__ : Any = int(config["num_epochs"] )
lowerCamelCase__ : Union[str, Any] = int(config["seed"] )
lowerCamelCase__ : Dict = int(config["batch_size"] )
lowerCamelCase__ : Optional[int] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
lowerCamelCase__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowerCamelCase__ : Dict = batch_size // MAX_GPU_BATCH_SIZE
lowerCamelCase__ : List[Any] = MAX_GPU_BATCH_SIZE
set_seed(_A )
# New Code #
# Create our folds:
lowerCamelCase__ : str = kfold.split(np.zeros(datasets["train"].num_rows ) , datasets["train"]["label"] )
lowerCamelCase__ : Union[str, Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_A ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = get_fold_dataloaders(
_A , _A , _A , _A , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Any = AdamW(params=model.parameters() , lr=_A )
# Instantiate scheduler
lowerCamelCase__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=100 , num_training_steps=(len(_A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = accelerator.prepare(
_A , _A , _A , _A , _A )
# Now we train the model
for epoch in range(_A ):
model.train()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowerCamelCase__ : int = model(**_A )
lowerCamelCase__ : Union[str, Any] = outputs.loss
lowerCamelCase__ : Any = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(**_A )
lowerCamelCase__ : Tuple = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_A , references=_A , )
lowerCamelCase__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _A )
# New Code #
# We also run predictions on the test set at the very end
lowerCamelCase__ : Dict = []
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : int = model(**_A )
lowerCamelCase__ : List[Any] = outputs.logits
lowerCamelCase__ , lowerCamelCase__ : List[str] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_A , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
lowerCamelCase__ : List[Any] = torch.cat(_A , dim=0 )
lowerCamelCase__ : Optional[int] = torch.stack(_A , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
lowerCamelCase__ : Any = metric.compute(predictions=_A , references=_A )
accelerator.print("Average test metrics from all folds:" , _A )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_A , default=_A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" , type=_A , default=3 , help="The number of splits to perform across the dataset" )
lowerCamelCase__ : Optional[Any] = parser.parse_args()
lowerCamelCase__ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_A , _A )
if __name__ == "__main__":
main()
| 184 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A : Optional[int] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
A : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Dict = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(_A , _A )
lowerCamelCase__ : List[Any] = calculate_rouge(_A , _A , bootstrap_aggregation=_A , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Any = "rougeLsum"
lowerCamelCase__ : List[str] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
lowerCamelCase__ : str = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : int = ["rouge1", "rouge2", "rougeL"]
lowerCamelCase__ : Union[str, Any] = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
lowerCamelCase__ : Any = calculate_rouge(_A , _A , newline_sep=_A , rouge_keys=_A )
assert score_sep == score_no_sep
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCamelCase__ : Tuple = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(_A , _A , newline_sep=_A ) == calculate_rouge(_A , _A , newline_sep=_A )
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : List[str] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCamelCase__ : str = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCamelCase__ : Union[str, Any] = calculate_rouge(_A , _A , rouge_keys=["rougeLsum"] , newline_sep=_A )["rougeLsum"]
lowerCamelCase__ : List[str] = calculate_rouge(_A , _A , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Tuple = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCamelCase__ : Any = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(_A , _A )
lowerCamelCase__ : str = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=_A )
assert isinstance(_A , _A )
| 184 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def a__ ( a__ ):
"""simple docstring"""
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def a__ ( a__ ):
"""simple docstring"""
for char in word:
__SCREAMING_SNAKE_CASE = ord(a__ )
if not _is_chinese_char(a__ ):
return 0
return 1
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = set()
for token in tokens:
__SCREAMING_SNAKE_CASE = len(a__ ) > 1 and is_chinese(a__ )
if chinese_word:
word_set.add(a__ )
__SCREAMING_SNAKE_CASE = list(a__ )
return word_list
def a__ ( a__ , a__ ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__SCREAMING_SNAKE_CASE = max([len(a__ ) for w in chinese_word_set] )
__SCREAMING_SNAKE_CASE = bert_tokens
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, len(a__ )
while start < end:
__SCREAMING_SNAKE_CASE = True
if is_chinese(bert_word[start] ):
__SCREAMING_SNAKE_CASE = min(end - start , a__ )
for i in range(a__ , 1 , -1 ):
__SCREAMING_SNAKE_CASE = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__SCREAMING_SNAKE_CASE = """##""" + bert_word[j]
__SCREAMING_SNAKE_CASE = start + i
__SCREAMING_SNAKE_CASE = False
break
if single_word:
start += 1
return bert_word
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(a__ ) , 1_00 ):
__SCREAMING_SNAKE_CASE = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
__SCREAMING_SNAKE_CASE = [get_chinese_word(a__ ) for r in res]
ltp_res.extend(a__ )
assert len(a__ ) == len(a__ )
__SCREAMING_SNAKE_CASE = []
for i in range(0 , len(a__ ) , 1_00 ):
__SCREAMING_SNAKE_CASE = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=a__ , truncation=a__ , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(a__ ) == len(a__ )
__SCREAMING_SNAKE_CASE = []
for input_ids, chinese_word in zip(a__ , a__ ):
__SCREAMING_SNAKE_CASE = []
for id in input_ids:
__SCREAMING_SNAKE_CASE = bert_tokenizer._convert_id_to_token(a__ )
input_tokens.append(a__ )
__SCREAMING_SNAKE_CASE = add_sub_symbol(a__ , a__ )
__SCREAMING_SNAKE_CASE = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a__ ):
if token[:2] == "##":
__SCREAMING_SNAKE_CASE = token[2:]
# save chinese tokens' pos
if len(a__ ) == 1 and _is_chinese_char(ord(a__ ) ):
ref_id.append(a__ )
ref_ids.append(a__ )
assert len(a__ ) == len(a__ )
return ref_ids
def a__ ( a__ ):
"""simple docstring"""
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = [line.strip() for line in data if len(a__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__SCREAMING_SNAKE_CASE = LTP(args.ltp ) # faster in GPU device
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.bert )
__SCREAMING_SNAKE_CASE = prepare_ref(a__ , a__ , a__ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = [json.dumps(a__ ) + """\n""" for ref in ref_ids]
f.writelines(a__ )
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
UpperCAmelCase : List[str] = parser.parse_args()
main(args)
| 364 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_config()
__SCREAMING_SNAKE_CASE = 300
return config
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = 50_265
__SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 331 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : Tuple = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xmod"
def __init__( self : Optional[Any] , __lowerCamelCase : int=30522 , __lowerCamelCase : str=768 , __lowerCamelCase : int=12 , __lowerCamelCase : str=12 , __lowerCamelCase : Optional[Any]=3072 , __lowerCamelCase : int="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : int=2 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : str=1E-1_2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Dict=0 , __lowerCamelCase : str=2 , __lowerCamelCase : Any="absolute" , __lowerCamelCase : int=True , __lowerCamelCase : Any=None , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=2 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Any=("en_XX",) , __lowerCamelCase : str=None , **__lowerCamelCase : str , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Dict = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Any = intermediate_size
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : Dict = type_vocab_size
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : str = position_embedding_type
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = classifier_dropout
lowerCamelCase__ : Any = pre_norm
lowerCamelCase__ : str = adapter_reduction_factor
lowerCamelCase__ : List[Any] = adapter_layer_norm
lowerCamelCase__ : Optional[Any] = adapter_reuse_layer_norm
lowerCamelCase__ : Any = ln_before_adapter
lowerCamelCase__ : str = list(__lowerCamelCase )
lowerCamelCase__ : List[Any] = default_language
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Optional[int] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 184 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : str = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _lowercase :
"""simple docstring"""
A__ = True
A__ = None
# Automatically constructed
A__ = "PIL.Image.Image"
A__ = pa.struct({"bytes": pa.binary(), "path": pa.string()})
A__ = field(default="Image" , init=lowercase__ , repr=lowercase__)
def __call__( self : Any ):
'''simple docstring'''
return self.pa_type
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : str = np.array(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": value, "bytes": None}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"path": None, "bytes": value}
elif isinstance(__lowerCamelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCamelCase )
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCamelCase )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def lowerCAmelCase ( self : Any , __lowerCamelCase : dict , __lowerCamelCase : List[Any]=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = PIL.Image.open(__lowerCamelCase )
else:
lowerCamelCase__ : Tuple = path.split("::" )[-1]
try:
lowerCamelCase__ : str = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )["repo_id"]
lowerCamelCase__ : Any = token_per_repo_id.get(__lowerCamelCase )
except ValueError:
lowerCamelCase__ : int = None
with xopen(__lowerCamelCase , "rb" , use_auth_token=__lowerCamelCase ) as f:
lowerCamelCase__ : List[str] = BytesIO(f.read() )
lowerCamelCase__ : Optional[int] = PIL.Image.open(bytes_ )
else:
lowerCamelCase__ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
lowerCamelCase__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowerCamelCase__ : List[Any] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Any = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
lowerCamelCase__ : Dict = storage.field("bytes" )
else:
lowerCamelCase__ : Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
lowerCamelCase__ : Dict = storage.field("path" )
else:
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowerCamelCase__ : Union[str, Any] = pa.array(
[encode_np_array(np.array(__lowerCamelCase ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowerCamelCase__ : Dict = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
lowerCamelCase__ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowerCAmelCase ( self : int , __lowerCamelCase : pa.StructArray ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Union[str, Any] ):
with xopen(__lowerCamelCase , "rb" ) as f:
lowerCamelCase__ : str = f.read()
return bytes_
lowerCamelCase__ : List[Any] = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowerCamelCase__ : Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
lowerCamelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def lowercase_ ( ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowerCamelCase__ : List[str] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
lowerCamelCase__ : int = image.format
else:
lowerCamelCase__ : int = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_A , format=_A )
return buffer.getvalue()
def lowercase_ ( _A : "PIL.Image.Image" ):
"""simple docstring"""
if hasattr(_A , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : np.ndarray ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowerCamelCase__ : int = array.dtype
lowerCamelCase__ : List[str] = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowerCamelCase__ : List[str] = dtype.kind
lowerCamelCase__ : Optional[Any] = dtype.itemsize
lowerCamelCase__ : Dict = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowerCamelCase__ : List[Any] = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowerCamelCase__ : Any = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowerCamelCase__ : Optional[Any] = dtype_byteorder + dtype_kind + str(_A )
lowerCamelCase__ : int = np.dtype(_A )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
lowerCamelCase__ : List[Any] = PIL.Image.fromarray(array.astype(_A ) )
return {"path": None, "bytes": image_to_bytes(_A )}
def lowercase_ ( _A : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowerCamelCase__ , lowerCamelCase__ : int = first_non_null_value(_A )
if isinstance(_A , _A ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_A , np.ndarray ):
lowerCamelCase__ : Optional[Any] = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
elif isinstance(_A , PIL.Image.Image ):
lowerCamelCase__ : int = no_op_if_value_is_null(_A )
return [obj_to_image_dict_func(_A ) for obj in objs]
else:
return objs
else:
return objs
| 184 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__ ( __UpperCamelCase )-> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__ ( __UpperCamelCase )-> list[int]:
UpperCamelCase = str(__UpperCamelCase )
UpperCamelCase = [n]
for i in range(1 , len(__UpperCamelCase ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowercase__ ( __UpperCamelCase )-> bool:
if len(str(__UpperCamelCase ) ) > 3:
if not is_prime(int(str(__UpperCamelCase )[-3:] ) ) or not is_prime(int(str(__UpperCamelCase )[:3] ) ):
return False
return True
def lowercase__ ( __UpperCamelCase = 11 )-> list[int]:
UpperCamelCase = []
UpperCamelCase = 13
while len(__UpperCamelCase ) != count:
if validate(__UpperCamelCase ):
UpperCamelCase = list_truncated_nums(__UpperCamelCase )
if all(is_prime(__UpperCamelCase ) for i in list_nums ):
list_truncated_primes.append(__UpperCamelCase )
num += 2
return list_truncated_primes
def lowercase__ ( )-> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f'{sum(compute_truncated_primes(1_1)) = }')
| 183 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class a_ ( unittest.TestCase ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
UpperCamelCase ,UpperCamelCase = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["""c"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [2] )
# Out indices set to match out features
UpperCamelCase ,UpperCamelCase = get_aligned_output_features_output_indices(["""a""", """c"""] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["""a""", """c"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [0, 2] )
# Out features set to match out indices
UpperCamelCase ,UpperCamelCase = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , [0, 2] , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["""a""", """c"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [0, 2] )
# Out features selected from negative indices
UpperCamelCase ,UpperCamelCase = get_aligned_output_features_output_indices(_SCREAMING_SNAKE_CASE , [-3, -1] , _SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , ["""a""", """c"""] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [-3, -1] )
def A__ ( self ) -> str:
"""simple docstring"""
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _SCREAMING_SNAKE_CASE )
# Out features must be a list
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(_SCREAMING_SNAKE_CASE , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(_SCREAMING_SNAKE_CASE , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = BackboneMixin()
UpperCamelCase = ["""a""", """b""", """c"""]
UpperCamelCase = ["""a""", """c"""]
UpperCamelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCamelCase = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCamelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 183 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = StableUnCLIPPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
lowercase_ = False
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=_lowerCAmelCase , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=_lowerCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=_lowerCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = StableUnCLIPImageNormalizer(embedding_dim=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_lowerCAmelCase , layers_per_block=1 , upcast_attention=_lowerCAmelCase , use_linear_projection=_lowerCAmelCase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL()
SCREAMING_SNAKE_CASE_ = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=0 ):
if str(_lowerCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=_lowerCAmelCase )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
SCREAMING_SNAKE_CASE_ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe('anime turle' , generator=_lowerCAmelCase , output_type='np' )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9 | 225 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : Union[str, Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Dict = ['LayoutLMv2FeatureExtractor']
lowerCamelCase__ : Dict = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Union[str, Any] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 225 | 1 |
import numpy as np
UpperCamelCase__ : int = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class lowerCamelCase_ :
def __init__( self : int ):
'''simple docstring'''
a = np.array(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : str ):
'''simple docstring'''
a , a = np.where(letter == self.SQUARE )
a = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int ):
'''simple docstring'''
a = self.SQUARE[indexa - 1, indexa - 1]
return letter
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ):
'''simple docstring'''
a = message.lower()
a = message.replace(''' ''' ,'''''' )
a = message.replace('''j''' ,'''i''' )
a = np.empty((2, len(__lowerCamelCase )) )
for letter_index in range(len(__lowerCamelCase ) ):
a = self.letter_to_numbers(message[letter_index] )
a = numbers[0]
a = numbers[1]
a = first_step.reshape(2 * len(__lowerCamelCase ) )
a = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
a = int(second_step[numbers_index * 2] )
a = int(second_step[(numbers_index * 2) + 1] )
a = self.numbers_to_letter(__lowerCamelCase ,__lowerCamelCase )
a = encoded_message + letter
return encoded_message
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ):
'''simple docstring'''
a = message.lower()
message.replace(''' ''' ,'''''' )
a = np.empty(2 * len(__lowerCamelCase ) )
for letter_index in range(len(__lowerCamelCase ) ):
a = self.letter_to_numbers(message[letter_index] )
a = numbers[0]
a = numbers[1]
a = first_step.reshape((2, len(__lowerCamelCase )) )
a = ''''''
for numbers_index in range(len(__lowerCamelCase ) ):
a = int(second_step[0, numbers_index] )
a = int(second_step[1, numbers_index] )
a = self.numbers_to_letter(__lowerCamelCase ,__lowerCamelCase )
a = decoded_message + letter
return decoded_message
| 350 |
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int:
"""simple docstring"""
a = ''''''
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int:
"""simple docstring"""
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]:
"""simple docstring"""
a = ''''''
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict:
"""simple docstring"""
a = int('''0b''' + data[0] + data[-1], 2 )
a = int('''0b''' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]:
"""simple docstring"""
a = message[:4]
a = message[4:]
a = apply_table(snake_case_, snake_case_ )
a = xor(snake_case_, snake_case_ )
a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741
a = apply_sbox(snake_case_, temp[4:] )
a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741
a = '''0''' * (2 - len(snake_case_ )) + r
a = apply_table(l + r, snake_case_ )
a = xor(snake_case_, snake_case_ )
return temp + right
if __name__ == "__main__":
UpperCamelCase__ : int = input("""Enter 10 bit key: """)
UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """)
UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
UpperCamelCase__ : Optional[int] = [2, 4, 3, 1]
UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table)
UpperCamelCase__ : str = temp[:5]
UpperCamelCase__ : List[Any] = temp[5:]
UpperCamelCase__ : Dict = left_shift(left)
UpperCamelCase__ : Any = left_shift(right)
UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : int = left_shift(right)
UpperCamelCase__ : List[str] = left_shift(left)
UpperCamelCase__ : Dict = left_shift(right)
UpperCamelCase__ : List[str] = apply_table(left + right, pa_table)
# encryption
UpperCamelCase__ : Tuple = apply_table(message, IP)
UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4]
UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Tuple = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP)
UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4]
UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp)
UpperCamelCase__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 330 | 0 |
import json
import sys
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as f:
snake_case_ = json.load(SCREAMING_SNAKE_CASE__ )
snake_case_ = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(SCREAMING_SNAKE_CASE__ ):
snake_case_ = results[benchmark_name]
snake_case_ = benchmark_name.split('''/''' )[-1]
output_md.append(F'''### Benchmark: {benchmark_file_name}''' )
snake_case_ = '''| metric |'''
snake_case_ = '''|--------|'''
snake_case_ = '''| new / old (diff) |'''
for metric_name in sorted(SCREAMING_SNAKE_CASE__ ):
snake_case_ = benchmark_res[metric_name]
snake_case_ = metric_vals['''new''']
snake_case_ = metric_vals.get('''old''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = metric_vals.get('''diff''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = F''' {new_val:f}''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else '''None'''
if old_val is not None:
val_str += F''' / {old_val:f}''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F''' ({dif_val:f})''' if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
lowerCAmelCase_ = sys.argv[1]
lowerCAmelCase_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 8 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCAmelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCAmelCase : int = '''huggingface-tools/default-prompts'''
_lowerCAmelCase : Any = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict="run" ) -> List[Any]:
if prompt_or_repo_id is None:
A_ : Optional[int] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _lowerCAmelCase ) is not None:
return prompt_or_repo_id
A_ : Optional[Any] = cached_file(
_lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 300 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class _a :
"""simple docstring"""
def __init__( self : Dict )->int:
_UpperCAmelCase = {}
def lowercase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict=1 )->str:
if self.graph.get(snake_case_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase = [[w, v]]
if not self.graph.get(snake_case_ ):
_UpperCAmelCase = []
def lowercase__ ( self : Union[str, Any] )->str:
return list(self.graph )
def lowercase__ ( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple )->Tuple:
if self.graph.get(snake_case_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case_ )
def lowercase__ ( self : Any , __UpperCamelCase : List[Any]=-2 , __UpperCamelCase : Union[str, Any]=-1 )->int:
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case_ ) != 0:
_UpperCAmelCase = stack[len(snake_case_ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return visited
def lowercase__ ( self : str , __UpperCamelCase : List[Any]=-1 )->List[str]:
if c == -1:
_UpperCAmelCase = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(snake_case_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case_ , snake_case_ , 1 )
def lowercase__ ( self : str , __UpperCamelCase : Any=-2 )->Any:
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(snake_case_ )
visited.append(snake_case_ )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self : str , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] )->str:
return len(self.graph[u] )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : int=-2 )->Any:
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
_UpperCAmelCase = s
_UpperCAmelCase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(snake_case_ ) != 0:
_UpperCAmelCase = stack[len(snake_case_ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return sorted_nodes
def lowercase__ ( self : int )->str:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(snake_case_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(snake_case_ ) != 0:
_UpperCAmelCase = stack[len(snake_case_ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(snake_case_ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return list(snake_case_ )
def lowercase__ ( self : Union[str, Any] )->Optional[int]:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(snake_case_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(snake_case_ ) != 0:
_UpperCAmelCase = stack[len(snake_case_ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(snake_case_ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return False
def lowercase__ ( self : str , __UpperCamelCase : List[str]=-2 , __UpperCamelCase : Optional[Any]=-1 )->List[str]:
_UpperCAmelCase = time()
self.dfs(snake_case_ , snake_case_ )
_UpperCAmelCase = time()
return end - begin
def lowercase__ ( self : str , __UpperCamelCase : Union[str, Any]=-2 )->List[str]:
_UpperCAmelCase = time()
self.bfs(snake_case_ )
_UpperCAmelCase = time()
return end - begin
class _a :
"""simple docstring"""
def __init__( self : Dict )->Optional[Any]:
_UpperCAmelCase = {}
def lowercase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=1 )->List[Any]:
# check if the u exists
if self.graph.get(snake_case_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase = [[w, v]]
# add the other way
if self.graph.get(snake_case_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase = [[w, u]]
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] )->int:
if self.graph.get(snake_case_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case_ )
# the other way round
if self.graph.get(snake_case_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(snake_case_ )
def lowercase__ ( self : str , __UpperCamelCase : int=-2 , __UpperCamelCase : List[str]=-1 )->int:
if s == d:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
_UpperCAmelCase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case_ ) != 0:
_UpperCAmelCase = stack[len(snake_case_ ) - 1]
else:
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return visited
def lowercase__ ( self : List[str] , __UpperCamelCase : List[Any]=-1 )->Tuple:
if c == -1:
_UpperCAmelCase = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(snake_case_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_UpperCAmelCase = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case_ , snake_case_ , 1 )
def lowercase__ ( self : int , __UpperCamelCase : Optional[int]=-2 )->int:
_UpperCAmelCase = deque()
_UpperCAmelCase = []
if s == -2:
_UpperCAmelCase = list(self.graph )[0]
d.append(snake_case_ )
visited.append(snake_case_ )
while d:
_UpperCAmelCase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self : List[Any] , __UpperCamelCase : Optional[Any] )->Optional[Any]:
return len(self.graph[u] )
def lowercase__ ( self : List[str] )->Dict:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(snake_case_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(snake_case_ ) != 0:
_UpperCAmelCase = stack[len(snake_case_ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(snake_case_ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return list(snake_case_ )
def lowercase__ ( self : Optional[int] )->Optional[int]:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
_UpperCAmelCase = -2
_UpperCAmelCase = []
_UpperCAmelCase = s
_UpperCAmelCase = False
_UpperCAmelCase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase = len(snake_case_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase = True
if len(snake_case_ ) != 0:
_UpperCAmelCase = stack[len(snake_case_ ) - 1]
else:
_UpperCAmelCase = False
indirect_parents.append(snake_case_ )
_UpperCAmelCase = s
_UpperCAmelCase = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return False
def lowercase__ ( self : List[Any] )->Dict:
return list(self.graph )
def lowercase__ ( self : str , __UpperCamelCase : Optional[Any]=-2 , __UpperCamelCase : Dict=-1 )->Any:
_UpperCAmelCase = time()
self.dfs(snake_case_ , snake_case_ )
_UpperCAmelCase = time()
return end - begin
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=-2 )->Tuple:
_UpperCAmelCase = time()
self.bfs(snake_case_ )
_UpperCAmelCase = time()
return end - begin
| 352 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Tuple = logging.get_logger(__name__)
a_ : List[Any] = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
a_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
a_ : List[str] = """</w>"""
a_ : List[str] = """@@ """
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = set()
lowerCamelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase_ = char
return pairs
# Speech2Text2 has no max input length
a_ : Any = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase , UpperCamelCase="<s>" , UpperCamelCase="<pad>" , UpperCamelCase="</s>" , UpperCamelCase="<unk>" , UpperCamelCase=False , UpperCamelCase=None , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , do_lower_case=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = do_lower_case
with open(UpperCamelCase , encoding="utf-8" ) as vocab_handle:
lowerCamelCase_ = json.load(UpperCamelCase )
lowerCamelCase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowerCamelCase_ = None
lowerCamelCase_ = None
else:
with open(UpperCamelCase , encoding="utf-8" ) as merges_handle:
lowerCamelCase_ = merges_handle.read().split("\n" )[:-1]
lowerCamelCase_ = [tuple(merge.split()[:2] ) for merge in merges]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = {}
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.decoder )
def snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCamelCase_ = get_pairs(UpperCamelCase )
if not pairs:
return token
while True:
lowerCamelCase_ = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase_ ,lowerCamelCase_ = bigram
lowerCamelCase_ = []
lowerCamelCase_ = 0
while i < len(UpperCamelCase ):
try:
lowerCamelCase_ = word.index(UpperCamelCase , UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase_ = j
if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase_ = tuple(UpperCamelCase )
lowerCamelCase_ = new_word
if len(UpperCamelCase ) == 1:
break
else:
lowerCamelCase_ = get_pairs(UpperCamelCase )
lowerCamelCase_ = " ".join(UpperCamelCase )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCamelCase_ = "\n" + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase ):
lowerCamelCase_ = word.replace(UpperCamelCase , "" )
lowerCamelCase_ = word.replace(" " , UpperCamelCase )
lowerCamelCase_ = word
return word
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
lowerCamelCase_ = text.lower()
lowerCamelCase_ = text.split()
lowerCamelCase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase ).split(" " ) ) )
return split_tokens
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.decoder.get(UpperCamelCase , self.unk_token )
return result
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = " ".join(UpperCamelCase )
# make sure @@ tokens are concatenated
lowerCamelCase_ = "".join(string.split(UpperCamelCase ) )
return string
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(
UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + "\n" )
lowerCamelCase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase_ = token_index
writer.write(" ".join(UpperCamelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 55 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case ( lowercase , lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = StableUnCLIPPipeline
_lowerCamelCase = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 32
lowerCamelCase_ = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=UpperCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=UpperCamelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=UpperCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCamelCase )
lowerCamelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCamelCase , layers_per_block=1 , upcast_attention=UpperCamelCase , use_linear_projection=UpperCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def snake_case ( self , UpperCamelCase , UpperCamelCase=0 ):
"""simple docstring"""
if str(UpperCamelCase ).startswith("mps" ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowerCamelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCamelCase )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ = pipe("anime turle" , generator=UpperCamelCase , output_type="np" )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 55 | 1 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_lowercase ):
for j in range(_lowercase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = [[float('''inf''' ) for _ in range(_lowercase )] for _ in range(_lowercase )]
for i in range(_lowercase ):
for j in range(_lowercase ):
UpperCAmelCase_ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowercase ):
# looping through rows of graph array
for i in range(_lowercase ):
# looping through columns of graph array
for j in range(_lowercase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCAmelCase_ : Tuple = dist[i][k] + dist[k][j]
_print_dist(_lowercase , _lowercase )
return dist, v
if __name__ == "__main__":
__a = int(input('Enter number of vertices: '))
__a = int(input('Enter number of edges: '))
__a = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
__a = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
__a = int(input('Enter source:'))
__a = int(input('Enter destination:'))
__a = float(input('Enter weight:'))
__a = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0 | 235 |
from ...processing_utils import ProcessorMixin
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''SpeechT5FeatureExtractor'''
lowerCAmelCase = '''SpeechT5Tokenizer'''
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
super().__init__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def __call__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : List[str] = kwargs.pop('''audio''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = kwargs.pop('''text''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = kwargs.pop('''text_target''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = kwargs.pop('''audio_target''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = kwargs.pop('''sampling_rate''' ,_SCREAMING_SNAKE_CASE )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
UpperCAmelCase_ : Optional[Any] = self.feature_extractor(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
elif text is not None:
UpperCAmelCase_ : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = None
if audio_target is not None:
UpperCAmelCase_ : List[Any] = self.feature_extractor(audio_target=_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,sampling_rate=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = targets['''input_values''']
elif text_target is not None:
UpperCAmelCase_ : Optional[int] = self.tokenizer(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = targets['''input_ids''']
else:
UpperCAmelCase_ : Tuple = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Dict = labels
UpperCAmelCase_ : Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase_ : List[Any] = decoder_attention_mask
return inputs
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Dict = kwargs.pop('''input_values''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''input_ids''' ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = kwargs.pop('''labels''' ,_SCREAMING_SNAKE_CASE )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
UpperCAmelCase_ : Tuple = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
elif input_ids is not None:
UpperCAmelCase_ : Tuple = self.tokenizer.pad(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : List[str] = None
if labels is not None:
if "input_ids" in labels or (isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and "input_ids" in labels[0]):
UpperCAmelCase_ : Tuple = self.tokenizer.pad(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = targets['''input_ids''']
else:
UpperCAmelCase_ : int = self.feature_extractor.feature_size
UpperCAmelCase_ : List[str] = self.feature_extractor.num_mel_bins
UpperCAmelCase_ : Optional[Any] = self.feature_extractor.pad(_SCREAMING_SNAKE_CASE ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = feature_size_hack
UpperCAmelCase_ : List[Any] = targets['''input_values''']
else:
UpperCAmelCase_ : Optional[int] = None
if inputs is None:
return targets
if targets is not None:
UpperCAmelCase_ : Optional[int] = labels
UpperCAmelCase_ : Union[str, Any] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
UpperCAmelCase_ : List[Any] = decoder_attention_mask
return inputs
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 235 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowerCAmelCase :int = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : nn.ModuleList , lowerCAmelCase : nn.ModuleList , lowerCAmelCase : List[int] ):
"""simple docstring"""
__magic_name__ : Optional[int] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), f'{len(lowerCAmelCase )} != {len(lowerCAmelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowerCAmelCase :Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowerCAmelCase :Optional[Any] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
try:
__magic_name__ : List[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
f' {n_student}' )
return list(range(lowerCAmelCase ) )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCamelCase ( lowerCAmelCase : Union[str, PreTrainedModel] , lowerCAmelCase : Union[str, Path] = "student" , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Union[int, None] = None , lowerCAmelCase : Tuple=False , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
__magic_name__ : Optional[Any] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
__magic_name__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), f'teacher must be a model or string got type {type(lowerCAmelCase )}'
__magic_name__ : str = teacher.config.to_diff_dict()
try:
__magic_name__ , __magic_name__ : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__magic_name__ : Dict = teacher_e
if d is None:
__magic_name__ : List[str] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__magic_name__ , __magic_name__ : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__magic_name__ , __magic_name__ : Any = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__magic_name__ : Any = teacher_e
if d is None:
__magic_name__ : Any = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
__magic_name__ : Optional[Any] = teacher.config_class(**lowerCAmelCase )
__magic_name__ : Dict = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__magic_name__ : Dict = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__magic_name__ , __magic_name__ : List[str] = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
f'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
f' {save_path}' )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__magic_name__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
__magic_name__ : List[int] = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
f'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
__magic_name__ : Tuple = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 331 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 | 1 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase__ :
SCREAMING_SNAKE_CASE_ =42
# setable values
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =42
SCREAMING_SNAKE_CASE_ =None
@classmethod
def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ):
'''simple docstring'''
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =42
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE_ =42
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dtype
def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCAmelCase__ : Any = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : List[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Optional[Any] = state.common.betas[t]
UpperCAmelCase__ : Any = (predicted_variance + 1) / 2
UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = timestep
if key is None:
UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : int = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t
UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 )
UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 298 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss
UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy()
UpperCAmelCase__ : List[Any] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 298 | 1 |
import pprint
import requests
snake_case_ = 'https://zenquotes.io/api'
def lowerCamelCase__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowerCamelCase__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case_ = random_quotes()
pprint.pprint(response)
| 24 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case_ = '\nimport os\n'
snake_case_ = '\ndef foo():\n import os\n return False\n'
snake_case_ = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case_ = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case_ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , snake_case_ )
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Optional[int] ) -> Dict:
__snake_case = os.path.join(snake_case_ , '''test_file.py''' )
with open(snake_case_ , '''w''' ) as _tmp_file:
_tmp_file.write(snake_case_ )
__snake_case = get_imports(snake_case_ )
assert parsed_imports == ["os"]
| 24 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __A ( a__ ):
_UpperCamelCase : Optional[Any] = 42
class __A ( a__ , a__ ):
@register_to_config
def __init__( self , a__ = 3 , a__ = 3 , a__ = ("DownEncoderBlock2D",) , a__ = ("UpDecoderBlock2D",) , a__ = (64,) , a__ = 1 , a__ = "silu" , a__ = 3 , a__ = 32 , a__ = 256 , a__ = 32 , a__ = None , a__ = 0.1_8_2_1_5 , a__ = "group" , ):
super().__init__()
# pass init params to Encoder
_lowerCAmelCase : Optional[Any] = Encoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , down_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , double_z=_lowerCamelCase , )
_lowerCAmelCase : int = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCAmelCase : Tuple = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
_lowerCAmelCase : Dict = VectorQuantizer(_lowerCamelCase , _lowerCamelCase , beta=0.2_5 , remap=_lowerCamelCase , sane_index_shape=_lowerCamelCase )
_lowerCAmelCase : str = nn.Convad(_lowerCamelCase , _lowerCamelCase , 1 )
# pass init params to Decoder
_lowerCAmelCase : List[Any] = Decoder(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , up_block_types=_lowerCamelCase , block_out_channels=_lowerCamelCase , layers_per_block=_lowerCamelCase , act_fn=_lowerCamelCase , norm_num_groups=_lowerCamelCase , norm_type=_lowerCamelCase , )
@apply_forward_hook
def __A ( self , a__ , a__ = True ):
_lowerCAmelCase : Any = self.encoder(_lowerCamelCase )
_lowerCAmelCase : str = self.quant_conv(_lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_lowerCamelCase )
@apply_forward_hook
def __A ( self , a__ , a__ = False , a__ = True ):
if not force_not_quantize:
_lowerCAmelCase : Union[str, Any] = self.quantize(_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = h
_lowerCAmelCase : int = self.post_quant_conv(_lowerCamelCase )
_lowerCAmelCase : List[Any] = self.decoder(_lowerCamelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
def __A ( self , a__ , a__ = True ):
_lowerCAmelCase : List[str] = sample
_lowerCAmelCase : Dict = self.encode(_lowerCamelCase ).latents
_lowerCAmelCase : Optional[Any] = self.decode(_lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCamelCase )
| 354 | """simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[torch.FloatTensor] = None
_UpperCamelCase : torch.FloatTensor = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
_UpperCamelCase : Optional[Tuple[torch.FloatTensor]] = None
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__=1 , a__=0 , a__=2 , a__=512 , a__="cls" , a__=False , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : Optional[Any] = project_dim
_lowerCAmelCase : List[str] = pooler_fn
_lowerCAmelCase : Any = learn_encoder
_lowerCAmelCase : Optional[int] = use_attention_mask
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = [R"pooler", R"logit_scale"]
_UpperCamelCase : List[Any] = [R"position_ids", R"predictions.decoder.bias"]
_UpperCamelCase : List[Any] = "roberta"
_UpperCamelCase : Optional[int] = RobertaSeriesConfig
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : str = XLMRobertaModel(a__ )
_lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : List[Any] = getattr(a__ , """has_pre_transformation""" , a__ )
if self.has_pre_transformation:
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size , config.project_dim )
_lowerCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __A ( self , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , ):
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Optional[int] = self.base_model(
input_ids=a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_attentions=a__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=a__ , )
if self.has_pre_transformation:
_lowerCAmelCase : Optional[Any] = outputs["""hidden_states"""][-2]
_lowerCAmelCase : Optional[Any] = self.pre_LN(a__ )
_lowerCAmelCase : int = self.transformation_pre(a__ )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_lowerCAmelCase : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=a__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 126 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCAmelCase : int = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
lowerCAmelCase : List[str] = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
@lru_cache()
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Optional[Any] = bs[:]
SCREAMING_SNAKE_CASE_: int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Any = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = set()
SCREAMING_SNAKE_CASE_: int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]="replace" , lowerCAmelCase__ : List[Any]="<s>" , lowerCAmelCase__ : List[Any]="</s>" , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : int="<unk>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : str="<mask>" , lowerCAmelCase__ : Optional[int]=False , **lowerCAmelCase__ : Optional[int] , ):
SCREAMING_SNAKE_CASE_: Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: str = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: Any = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: List[Any] = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[str] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[str] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : str):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: str = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = bigram
SCREAMING_SNAKE_CASE_: Union[str, Any] = []
SCREAMING_SNAKE_CASE_: Any = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: Optional[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Union[str, Any] = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: Tuple = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: int = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = word
return word
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[Any] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : List[Any]):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[Any] = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: str = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: List[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[int] = " " + text
return (text, kwargs)
| 13 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Dict ):
'''simple docstring'''
lowerCamelCase_ = torch.load(lowercase , map_location='cpu' )
lowerCamelCase_ = chkpt['model']
# We have the base model one level deeper than the original XLM repository
lowerCamelCase_ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCamelCase_ = v
else:
lowerCamelCase_ = v
lowerCamelCase_ = chkpt['params']
lowerCamelCase_ = {n: v for n, v in config.items() if not isinstance(lowercase , (torch.FloatTensor, numpy.ndarray) )}
lowerCamelCase_ = chkpt['dico_word2id']
lowerCamelCase_ = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCamelCase_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
lowerCamelCase_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(lowercase , lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowercase , indent=2 ) + '\n' )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowerCamelCase : int = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 208 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCamelCase : Tuple = "sshleifer/mar_enro_6_3_student"
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=A_ , )
lowerCamelCase_ = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
MarianMTModel.from_pretrained(A_ )
@slow
@require_torch_gpu
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowerCamelCase_ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowerCamelCase_ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowerCamelCase_ = bash_script.replace(A_ , str(A_ ) )
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCamelCase_ = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCamelCase_ = ['finetune.py'] + bash_script.split() + args
with patch.object(A_ , 'argv' , A_ ):
lowerCamelCase_ = argparse.ArgumentParser()
lowerCamelCase_ = pl.Trainer.add_argparse_args(A_ )
lowerCamelCase_ = SummarizationModule.add_model_specific_args(A_ , os.getcwd() )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = main(A_ )
# Check metrics
lowerCamelCase_ = load_json(model.metrics_save_path )
lowerCamelCase_ = metrics['val'][0]
lowerCamelCase_ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , A_ )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCamelCase_ = os.listdir(A_ )
lowerCamelCase_ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCamelCase_ = os.path.join(args.output_dir , A_ )
lowerCamelCase_ = torch.load(A_ , map_location='cpu' )
lowerCamelCase_ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCamelCase_ = {os.path.basename(A_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class A( UpperCamelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
lowerCamelCase_ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowerCamelCase_ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowerCamelCase_ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowerCamelCase_ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowerCamelCase_ = bash_script.replace(A_ , str(A_ ) )
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = bash_script.replace('--fp16' , '' )
lowerCamelCase_ = 6
lowerCamelCase_ = (
['distillation.py']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'--gpus=1',
'--learning_rate=1e-3',
f"""--num_train_epochs={epochs}""",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(A_ , 'argv' , A_ ):
lowerCamelCase_ = argparse.ArgumentParser()
lowerCamelCase_ = pl.Trainer.add_argparse_args(A_ )
lowerCamelCase_ = SummarizationDistiller.add_model_specific_args(A_ , os.getcwd() )
lowerCamelCase_ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCamelCase_ = distill_main(A_ )
# Check metrics
lowerCamelCase_ = load_json(model.metrics_save_path )
lowerCamelCase_ = metrics['val'][0]
lowerCamelCase_ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , A_ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCamelCase_ = os.listdir(A_ )
lowerCamelCase_ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCamelCase_ = os.path.join(args.output_dir , A_ )
lowerCamelCase_ = torch.load(A_ , map_location='cpu' )
lowerCamelCase_ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCamelCase_ = {os.path.basename(A_ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 208 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def A_ ( A__ ) -> List[str]:
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def A_ ( A__ ) -> Tuple:
a__ : Dict = create_tensor(A__ )
a__ : Tuple = gather(A__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def A_ ( A__ ) -> Any:
a__ : Tuple = [state.process_index]
a__ : Union[str, Any] = gather_object(A__ )
assert len(A__ ) == state.num_processes, F'{gathered_obj}, {len(A__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def A_ ( A__ ) -> Dict:
a__ : List[Any] = create_tensor(A__ )
a__ : Optional[int] = broadcast(A__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def A_ ( A__ ) -> List[str]:
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
a__ : str = torch.arange(state.num_processes + 1 ).to(state.device )
else:
a__ : Optional[int] = torch.arange(state.num_processes ).to(state.device )
a__ : Optional[Any] = pad_across_processes(A__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def A_ ( A__ ) -> List[Any]:
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : Optional[Any] = create_tensor(A__ )
a__ : str = reduce(A__ , 'sum' )
a__ : Any = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(A__ , A__ ), F'{reduced_tensor} != {truth_tensor}'
def A_ ( A__ ) -> Dict:
# For now runs on only two processes
if state.num_processes != 2:
return
a__ : int = create_tensor(A__ )
a__ : Union[str, Any] = reduce(A__ , 'mean' )
a__ : Any = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(A__ , A__ ), F'{reduced_tensor} != {truth_tensor}'
def A_ ( A__ ) -> List[Any]:
# For xla_spawn (TPUs)
main()
def A_ ( ) -> int:
a__ : List[str] = PartialState()
state.print(F'State: {state}' )
state.print('testing gather' )
test_gather(A__ )
state.print('testing gather_object' )
test_gather_object(A__ )
state.print('testing broadcast' )
test_broadcast(A__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(A__ )
state.print('testing reduce_sum' )
test_reduce_sum(A__ )
state.print('testing reduce_mean' )
test_reduce_mean(A__ )
if __name__ == "__main__":
main()
| 99 |
from math import loga
def A_ ( A__ ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(A__ , A__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__UpperCamelCase = ["small", "medium", "large"]
__UpperCamelCase = "lm_head.decoder.weight"
__UpperCamelCase = "lm_head.weight"
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase )
__snake_case : Optional[int] = d.pop(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
__UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__UpperCamelCase = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
__UpperCamelCase = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 13 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = "distilbert"
UpperCamelCase__ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , UpperCAmelCase=3_0522 , UpperCAmelCase=512 , UpperCAmelCase=False , UpperCAmelCase=6 , UpperCAmelCase=12 , UpperCAmelCase=768 , UpperCAmelCase=4 * 768 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=0.02 , UpperCAmelCase=0.1 , UpperCAmelCase=0.2 , UpperCAmelCase=0 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = sinusoidal_pos_embds
_UpperCAmelCase = n_layers
_UpperCAmelCase = n_heads
_UpperCAmelCase = dim
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation
_UpperCAmelCase = initializer_range
_UpperCAmelCase = qa_dropout
_UpperCAmelCase = seq_classif_dropout
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase )
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 39 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : int = MBartConfig
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Union[str, Any] = '''gelu'''
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFMBartModel(config=_A ).get_decoder()
__SCREAMING_SNAKE_CASE = inputs_dict['input_ids']
__SCREAMING_SNAKE_CASE = input_ids[:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['attention_mask'][:1, :]
__SCREAMING_SNAKE_CASE = inputs_dict['head_mask']
__SCREAMING_SNAKE_CASE = 1
# first forward pass
__SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple()
__SCREAMING_SNAKE_CASE = past_key_values[1]
def __lowercase ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ) -> str:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
UpperCamelCase__ : int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Tuple = False
UpperCamelCase__ : Union[str, Any] = False
def _A ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFMBartModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
UpperCamelCase__ : str = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
UpperCamelCase__ : List[str] = '''facebook/mbart-large-en-ro'''
@cached_property
def _A ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def _A ( self , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
__SCREAMING_SNAKE_CASE = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def _A ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 257 | 0 |
'''simple docstring'''
import numpy as np
def a__ ( _SCREAMING_SNAKE_CASE : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : list ) -> list:
"""simple docstring"""
UpperCAmelCase_ : str = len(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_lowerCamelCase = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 67 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase (UpperCamelCase__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''tokenizer''']
lowerCamelCase__ = '''CLIPImageProcessor'''
lowerCamelCase__ = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : List[Any] , __magic_name__ : List[str]=None , __magic_name__ : str=None , **__magic_name__ : Optional[int] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : Union[str, Any] , __magic_name__ : Optional[int]=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=None , **__magic_name__ : Optional[Any] ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def __A ( self : Optional[int] , *__magic_name__ : Any , **__magic_name__ : Dict ) -> Any:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def __A ( self : Union[str, Any] , *__magic_name__ : Optional[Any] , **__magic_name__ : str ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def __A ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 118 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Any="cosine" , )->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.00085,__lowerCamelCase = 0.012,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = "epsilon",__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = 1.0,__lowerCamelCase = "linspace",__lowerCamelCase = 0,):
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = use_karras_sigmas
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,):
A__ = self.index_for_timestep(__lowerCamelCase )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,):
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0,num_train_timesteps - 1,__lowerCamelCase,dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__lowerCamelCase,0,-step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(__lowerCamelCase )
A__ = np.interp(__lowerCamelCase,np.arange(0,len(__lowerCamelCase ) ),__lowerCamelCase )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=__lowerCamelCase,num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(__lowerCamelCase,__lowerCamelCase ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(__lowerCamelCase )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
A__ = timesteps.to(__lowerCamelCase,dtype=torch.floataa )
else:
A__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# get log sigma
A__ = np.log(__lowerCamelCase )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0),axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(__lowerCamelCase,0,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0,1,__lowerCamelCase )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,):
A__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__lowerCamelCase,__lowerCamelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 193 | 0 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : int = generate_pascal_triangle(__SCREAMING_SNAKE_CASE )
for row_idx in range(__SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
lowercase_ : list[list[int]] = []
for current_row_idx in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = populate_current_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
triangle.append(__SCREAMING_SNAKE_CASE )
return triangle
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[int] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase_ , lowercase_ : Optional[int] = 1, 1
for current_col_idx in range(1 , __SCREAMING_SNAKE_CASE ):
calculate_current_element(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return current_row
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , ):
lowercase_ : Tuple = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase_ : int = triangle[current_row_idx - 1][current_col_idx]
lowercase_ : Any = above_to_left_elt + above_to_right_elt
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
lowercase_ : list[list[int]] = [[1]]
for row_index in range(1 , __SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[int] = [0] + result[-1] + [0]
lowercase_ : Dict = row_index + 1
# Calculate the number of distinct elements in a row
lowercase_ : Optional[int] = sum(divmod(__SCREAMING_SNAKE_CASE , 2 ) )
lowercase_ : int = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowercase_ : List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase_ : List[Any] = row_first_half + row_second_half
result.append(__SCREAMING_SNAKE_CASE )
return result
def lowercase__( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None:
lowercase_ : int = F'''{func.__name__}({value})'''
lowercase_ : List[str] = timeit(F'''__main__.{call}''' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'''{call:38} -- {timing:.4f} seconds''' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321 | """simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
__SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered")
def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ):
lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 321 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def a__ ( SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
lowerCAmelCase : List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
i -= len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = i // 3
lowerCAmelCase : List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase : Union[str, Any] = (
chars_incl
+ random(SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
+ random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
)
lowerCAmelCase : List[Any] = list(SCREAMING_SNAKE_CASE )
shuffle(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
pass # Put your code here...
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 8 ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase : Optional[Any] = any(char in ascii_uppercase for char in password )
lowerCAmelCase : List[str] = any(char in ascii_lowercase for char in password )
lowerCAmelCase : Optional[int] = any(char in digits for char in password )
lowerCAmelCase : Union[str, Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = int(input("Please indicate the max length of your password: " ).strip() )
lowerCAmelCase : int = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(SCREAMING_SNAKE_CASE ) )
print(
"Alternative Password generated:" , alternative_password_generator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 108 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while b:
lowerCAmelCase , lowerCAmelCase : Any = b, a % b
return a
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE , a % b )
def a__ ( ):
'''simple docstring'''
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 108 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__UpperCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCamelCase : int = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = dict(scheduler.config )
UpperCamelCase : int = 1
UpperCamelCase : Any = FrozenDict(__SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase : Dict = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __SCREAMING_SNAKE_CASE , standard_warn=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = dict(scheduler.config )
UpperCamelCase : Optional[int] = True
UpperCamelCase : int = FrozenDict(__SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__SCREAMING_SNAKE_CASE , segmentation_processor=__SCREAMING_SNAKE_CASE , vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCamelCase : Optional[Any] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ):
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__SCREAMING_SNAKE_CASE , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 512 , __SCREAMING_SNAKE_CASE = 50 , __SCREAMING_SNAKE_CASE = 7.5 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 0.0 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = 1 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCamelCase : List[str] = self.segmentation_model(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase : Optional[int] = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase : List[str] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , )
| 371 |
def a ( SCREAMING_SNAKE_CASE_ : int = 5_0 ):
"""simple docstring"""
UpperCamelCase : List[str] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 315 | 0 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
lowerCAmelCase_ : Optional[int] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowerCAmelCase_ : Union[str, Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__A , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase_ : int = primes[:idx]
break
lowerCAmelCase_ , lowerCAmelCase_ : Any = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase_ : Any = False
for r in range(__A ):
lowerCAmelCase_ : Optional[Any] = pow(__A , d * 2**r , __A )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase_ : Optional[int] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 241 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''falcon'''
UpperCAmelCase__ : List[Any] = ['''past_key_values''']
def __init__( self : Union[str, Any] , _snake_case : List[str]=65024 , _snake_case : int=4544 , _snake_case : int=32 , _snake_case : Any=71 , _snake_case : int=1e-5 , _snake_case : Dict=0.0_2 , _snake_case : int=True , _snake_case : List[Any]=0.0 , _snake_case : Tuple=0.0 , _snake_case : int=None , _snake_case : Tuple=False , _snake_case : Any=False , _snake_case : str=True , _snake_case : Any=True , _snake_case : List[str]=False , _snake_case : Tuple=11 , _snake_case : Dict=11 , **_snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop('''n_embed''' , _snake_case)
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ = alibi
UpperCAmelCase_ = new_decoder_architecture
UpperCAmelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ = parallel_attn
UpperCAmelCase_ = bias
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return not self.alibi
| 51 | 0 |
from sklearn.metrics import mean_squared_error
import datasets
UpperCAmelCase__ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
UpperCAmelCase__ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
UpperCAmelCase__ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int=None , __UpperCAmelCase : Tuple="uniform_average" , __UpperCAmelCase : Dict=True ) ->str:
"""simple docstring"""
a = mean_squared_error(
lowercase_ , lowercase_ , sample_weight=lowercase_ , multioutput=lowercase_ , squared=lowercase_ )
return {"mse": mse}
| 371 |
import math
def _a ( a :int = 100 ) -> int:
a = sum(i * i for i in range(1 , n + 1 ) )
a = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 26 | 0 |
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# Return True if there is node that has not iterated.
__SCREAMING_SNAKE_CASE : int = [False] * len(A__ )
__SCREAMING_SNAKE_CASE : Dict = []
queue.append(A__ )
__SCREAMING_SNAKE_CASE : Tuple = True
while queue:
__SCREAMING_SNAKE_CASE : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A__ )
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : List[Any] = u
return visited[t]
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
# This array is filled by BFS and to store path
__SCREAMING_SNAKE_CASE : List[str] = [-1] * (len(A__ ))
__SCREAMING_SNAKE_CASE : int = 0
while bfs(A__ , A__ , A__ , A__ ):
__SCREAMING_SNAKE_CASE : Dict = float('''Inf''' )
__SCREAMING_SNAKE_CASE : List[str] = sink
while s != source:
# Find the minimum value in select path
__SCREAMING_SNAKE_CASE : List[Any] = min(A__ , graph[parent[s]][s] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = parent[s]
max_flow += path_flow
__SCREAMING_SNAKE_CASE : Optional[int] = sink
while v != source:
__SCREAMING_SNAKE_CASE : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__SCREAMING_SNAKE_CASE : Dict = parent[v]
return max_flow
lowercase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowercase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 303 | import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase__ ( A__ , A__=1000 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : List[Any] = n - 1
snake_case__ : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : Union[str, Any] = 0
while count < prec:
snake_case__ : Dict = random.randint(2 , n - 1 )
snake_case__ : Dict = bin_exp_mod(A__ , A__ , A__ )
if b != 1:
snake_case__ : Tuple = True
for _ in range(A__ ):
if b == n - 1:
snake_case__ : List[str] = False
break
snake_case__ : Dict = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ : str = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 143 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 2 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = "ctrl"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Any = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=24_6534 , __A=256 , __A=1280 , __A=8192 , __A=48 , __A=16 , __A=0.1 , __A=0.1 , __A=1E-6 , __A=0.0_2 , __A=True , **__A , ) -> str:
lowerCAmelCase_ :Optional[Any] = vocab_size
lowerCAmelCase_ :str = n_positions
lowerCAmelCase_ :int = n_embd
lowerCAmelCase_ :int = n_layer
lowerCAmelCase_ :Any = n_head
lowerCAmelCase_ :Any = dff
lowerCAmelCase_ :Optional[int] = resid_pdrop
lowerCAmelCase_ :Optional[int] = embd_pdrop
lowerCAmelCase_ :Any = layer_norm_epsilon
lowerCAmelCase_ :Any = initializer_range
lowerCAmelCase_ :Optional[Any] = use_cache
super().__init__(**__A )
| 84 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : str = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
__lowerCamelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def A_ ( _lowerCAmelCase ) -> List[str]:
UpperCamelCase : Optional[Any] = None
# source code of `config_class`
UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase )
UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
UpperCamelCase : Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCamelCase : List[Any] = ckpt_name
break
return checkpoint
def A_ ( ) -> List[str]:
UpperCamelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase )
UpperCamelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_lowerCAmelCase )
if len(_lowerCAmelCase ) > 0:
UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 52 | 0 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : int , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Dict=30 , _A : Any=2 , _A : Tuple=3 , _A : List[str]=True , _A : int=True , _A : str=32 , _A : Dict=5 , _A : List[str]=4 , _A : Optional[Any]=37 , _A : Optional[int]="gelu" , _A : str=0.1 , _A : Optional[int]=0.1 , _A : Any=10 , _A : Optional[Any]=0.0_2 , _A : int=3 , _A : Optional[Any]=0.6 , _A : Dict=None , ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = parent
snake_case_ : str = batch_size
snake_case_ : Union[str, Any] = image_size
snake_case_ : Union[str, Any] = patch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Dict = is_training
snake_case_ : Tuple = use_labels
snake_case_ : Dict = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Optional[Any] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Optional[int] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : List[str] = (image_size // patch_size) ** 2
snake_case_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : int = None
if self.use_labels:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : Dict , _A : int , _A : str , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = ViTMAEModel(config=_A )
model.to(_A )
model.eval()
snake_case_ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = ViTMAEForPreTraining(_A )
model.to(_A )
model.eval()
snake_case_ : List[Any] = model(_A )
snake_case_ : Optional[Any] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : List[Any] = 1
snake_case_ : Dict = ViTMAEForPreTraining(_A )
model.to(_A )
model.eval()
snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[Any] = model(_A )
snake_case_ : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
snake_case_ : str = config_and_inputs
snake_case_ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: Optional[int] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__magic_name__: int = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
__magic_name__: Tuple = False
__magic_name__: str = False
__magic_name__: str = False
__magic_name__: Union[str, Any] = False
def UpperCAmelCase_ ( self : Any ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = ViTMAEModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(_A )
snake_case_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Any = [*signature.parameters.keys()]
snake_case_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = torch.from_numpy(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[int] = pt_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(_A , _A ) )
snake_case_ : Optional[int] = outputs[0].cpu().numpy()
snake_case_ : Any = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model_class.from_pretrained(_A )
model.to(_A )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(_A , _A ) )
# Make sure we don't have nans
snake_case_ : List[Any] = after_outputs[0].cpu().numpy()
snake_case_ : Union[str, Any] = 0
snake_case_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : str ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Any = ViTMAEModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(_A )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Any = prepare_img()
snake_case_ : Any = image_processor(images=_A , return_tensors='pt' ).to(_A )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
snake_case_ : int = model(**_A , noise=torch.from_numpy(_A ).to(device=_A ) )
# verify the logits
snake_case_ : int = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : List[str] = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_A ) , atol=1E-4 ) )
| 361 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["image_processor", "tokenizer"]
__magic_name__: Optional[Any] = "LayoutLMv3ImageProcessor"
__magic_name__: str = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : int , _A : List[str]=None , _A : Dict=None , **_A : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
snake_case_ : Any = kwargs.pop('feature_extractor' )
snake_case_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
def __call__( self : List[str] , _A : Optional[Any] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , _A : Union[List[List[int]], List[List[List[int]]]] = None , _A : Optional[Union[List[int], List[List[int]]]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : str , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
snake_case_ : str = self.image_processor(images=_A , return_tensors=_A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_A , _A ):
snake_case_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case_ : str = features['words']
snake_case_ : Optional[int] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel values
snake_case_ : List[str] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
snake_case_ : Dict = self.get_overflowing_images(_A , encoded_inputs['overflow_to_sample_mapping'] )
snake_case_ : Optional[Any] = images
return encoded_inputs
def UpperCAmelCase_ ( self : Dict , _A : Tuple , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_A ) != len(_A ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F""" {len(_A )} and {len(_A )}""" )
return images_with_overflow
def UpperCAmelCase_ ( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def UpperCAmelCase_ ( self : Union[str, Any] , *_A : Dict , **_A : str ) -> Any:
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase_ ( self : Any ) -> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _A , )
return self.image_processor
| 88 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_1_2 , _UpperCAmelCase=1_6 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ) -> Any:
__UpperCamelCase : Tuple = parent
__UpperCamelCase : Dict = batch_size
__UpperCamelCase : Union[str, Any] = seq_length
__UpperCamelCase : List[str] = is_training
__UpperCamelCase : Tuple = use_attention_mask
__UpperCamelCase : Union[str, Any] = use_token_type_ids
__UpperCamelCase : Union[str, Any] = use_labels
__UpperCamelCase : Optional[int] = vocab_size
__UpperCamelCase : str = hidden_size
__UpperCamelCase : Any = num_hidden_layers
__UpperCamelCase : List[str] = num_attention_heads
__UpperCamelCase : Any = intermediate_size
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : Optional[Any] = hidden_dropout_prob
__UpperCamelCase : List[Any] = attention_probs_dropout_prob
__UpperCamelCase : int = max_position_embeddings
__UpperCamelCase : Optional[int] = type_vocab_size
__UpperCamelCase : str = type_sequence_label_size
__UpperCamelCase : Tuple = initializer_range
__UpperCamelCase : Optional[int] = num_choices
def a_ (self ) -> Tuple:
__UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : int = None
if self.use_token_type_ids:
__UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = config_and_inputs
__UpperCamelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def a_ (self ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = config_and_inputs
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A = True
A = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a_ (self ) -> int:
__UpperCamelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def a_ (self ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCamelCase : Any = model_class_name.from_pretrained("roberta-base" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 298 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''align_text_model'''
def __init__( self , _a=30522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.0_2 , _a=1E-12 , _a=0 , _a="absolute" , _a=True , **_a , ) -> List[str]:
super().__init__(**_a )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = position_embedding_type
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = pad_token_id
@classmethod
def __a ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''align_vision_model'''
def __init__( self , _a = 3 , _a = 600 , _a = 2.0 , _a = 3.1 , _a = 8 , _a = [3, 3, 5, 3, 5, 5, 3] , _a = [32, 16, 24, 40, 80, 112, 192] , _a = [16, 24, 40, 80, 112, 192, 320] , _a = [] , _a = [1, 2, 2, 2, 1, 2, 1] , _a = [1, 2, 2, 3, 3, 4, 1] , _a = [1, 6, 6, 6, 6, 6, 6] , _a = 0.2_5 , _a = "swish" , _a = 2560 , _a = "mean" , _a = 0.0_2 , _a = 0.0_0_1 , _a = 0.9_9 , _a = 0.2 , **_a , ) -> int:
super().__init__(**_a )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = width_coefficient
lowerCAmelCase_ = depth_coefficient
lowerCAmelCase_ = depth_divisor
lowerCAmelCase_ = kernel_sizes
lowerCAmelCase_ = in_channels
lowerCAmelCase_ = out_channels
lowerCAmelCase_ = depthwise_padding
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_block_repeats
lowerCAmelCase_ = expand_ratios
lowerCAmelCase_ = squeeze_expansion_ratio
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dim
lowerCAmelCase_ = pooling_type
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = batch_norm_eps
lowerCAmelCase_ = batch_norm_momentum
lowerCAmelCase_ = drop_connect_rate
lowerCAmelCase_ = sum(_a ) * 4
@classmethod
def __a ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
lowerCAmelCase_ , lowerCAmelCase_ = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowerCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_a , **_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = '''align'''
lowerCamelCase__ = True
def __init__( self , _a=None , _a=None , _a=640 , _a=1.0 , _a=0.0_2 , **_a , ) -> Any:
super().__init__(**_a )
if text_config is None:
lowerCAmelCase_ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
lowerCAmelCase_ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
lowerCAmelCase_ = AlignTextConfig(**_a )
lowerCAmelCase_ = AlignVisionConfig(**_a )
lowerCAmelCase_ = projection_dim
lowerCAmelCase_ = temperature_init_value
lowerCAmelCase_ = initializer_range
@classmethod
def __a ( cls , _a , _a , **_a ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ = self.text_config.to_dict()
lowerCAmelCase_ = self.vision_config.to_dict()
lowerCAmelCase_ = self.__class__.model_type
return output
| 366 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCamelCase__ = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A(__a: Dict , __a: List[str]=None ):
require_version(deps[pkg] , __a )
| 22 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a__ : Optional[Any] =logging.get_logger(__name__)
a__ : str ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a__ : Optional[int] ={
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
a__ : List[Any] ={
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
a__ : Tuple ={
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple =PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[Any] =RealmTokenizer
def __init__( self : Tuple , __A : Optional[Any]=None , __A : Optional[Any]=None , __A : Tuple=True , __A : Dict="[UNK]" , __A : Union[str, Any]="[SEP]" , __A : Union[str, Any]="[PAD]" , __A : Optional[int]="[CLS]" , __A : List[Any]="[MASK]" , __A : Union[str, Any]=True , __A : Optional[int]=None , **__A : Any , ):
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(__A , normalizer_state.pop('type' ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**__A )
__UpperCamelCase = do_lower_case
def _lowerCamelCase ( self : List[str] , __A : Union[str, Any] , **__A : List[str] ):
__UpperCamelCase = PaddingStrategy.MAX_LENGTH
__UpperCamelCase = text
__UpperCamelCase = kwargs.pop('text_pair' , __A )
__UpperCamelCase = kwargs.pop('return_tensors' , __A )
__UpperCamelCase = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(__A ):
if batch_text_pair is not None:
__UpperCamelCase = batch_text_pair[idx]
else:
__UpperCamelCase = None
__UpperCamelCase = super().__call__(__A , __A , return_tensors=__A , **__A )
__UpperCamelCase = encoded_candidates.get('input_ids' )
__UpperCamelCase = encoded_candidates.get('attention_mask' )
__UpperCamelCase = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__A )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__A )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__A )
__UpperCamelCase = {key: item for key, item in output_data.items() if len(__A ) != 0}
return BatchEncoding(__A , tensor_type=__A )
def _lowerCamelCase ( self : Union[str, Any] , __A : Optional[Any] , __A : Tuple=None ):
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self : Optional[Any] , __A : Optional[int] , __A : List[Any] = None ):
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Union[str, Any] , __A : str , __A : Any = None ):
__UpperCamelCase = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 53 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if index == r:
for j in range(SCREAMING_SNAKE_CASE__ ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase : Tuple = arr[i]
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , SCREAMING_SNAKE_CASE__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
# A temporary array to store all combination one by one
lowercase : Optional[int] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 20 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _a ( unittest.TestCase ):
def _lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_snake_case = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" ,safety_checker=_SCREAMING_SNAKE_CASE ,cache_dir=_SCREAMING_SNAKE_CASE )
_snake_case = [t[-1] for t in os.walk(os.path.join(_SCREAMING_SNAKE_CASE ,os.listdir(_SCREAMING_SNAKE_CASE )[0] ,"snapshots" ) )]
_snake_case = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _a ( unittest.TestCase ):
def _lowercase ( self ) -> List[str]:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" ,safety_checker=_SCREAMING_SNAKE_CASE )
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 4
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 49_947.875 ) < 5e-1
_snake_case = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_SCREAMING_SNAKE_CASE ) == num_samples
def _lowercase ( self ) -> Any:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="flax" ,safety_checker=_SCREAMING_SNAKE_CASE )
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 50
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 2_383_808.2) ) < 5e-1
def _lowercase ( self ) -> str:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa ,safety_checker=_SCREAMING_SNAKE_CASE )
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 50
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def _lowercase ( self ) -> Dict:
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa )
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 50
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 2_373_516.75) ) < 5e-1
def _lowercase ( self ) -> Optional[Any]:
_snake_case = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule="scaled_linear" ,set_alpha_to_one=_SCREAMING_SNAKE_CASE ,steps_offset=1 ,)
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa ,scheduler=_SCREAMING_SNAKE_CASE ,safety_checker=_SCREAMING_SNAKE_CASE ,)
_snake_case = scheduler.create_state()
_snake_case = scheduler_state
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.random.PRNGKey(0 )
_snake_case = 50
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
# shard inputs and rng
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = jax.random.split(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_SCREAMING_SNAKE_CASE ,dtype=np.floataa ).sum() - 2_347_693.5) ) < 5e-1
def _lowercase ( self ) -> int:
_snake_case = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
_snake_case = jax.device_count()
_snake_case = num_samples * [prompt]
_snake_case = jax.random.split(jax.random.PRNGKey(0 ) ,_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa ,safety_checker=_SCREAMING_SNAKE_CASE ,)
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_snake_case = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_snake_case , _snake_case = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" ,revision="bf16" ,dtype=jnp.bfloataa ,safety_checker=_SCREAMING_SNAKE_CASE ,use_memory_efficient_attention=_SCREAMING_SNAKE_CASE ,)
_snake_case = replicate(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline.prepare_inputs(_SCREAMING_SNAKE_CASE )
_snake_case = shard(_SCREAMING_SNAKE_CASE )
_snake_case = pipeline(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,jit=_SCREAMING_SNAKE_CASE ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_snake_case = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 351 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ : int = TypeVar('''T''')
UpperCamelCase_ : Dict = TypeVar('''U''')
class _a ( Generic[T, U] ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = key
_snake_case = val
_snake_case = None
_snake_case = None
def __repr__( self ) -> str:
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _a ( Generic[T, U] ):
def __init__( self ) -> None:
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = self.rear, self.head
def __repr__( self ) -> str:
_snake_case = ["DoubleLinkedList"]
_snake_case = self.head
while node.next is not None:
rep.append(str(_SCREAMING_SNAKE_CASE ) )
_snake_case = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> None:
_snake_case = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case = node
_snake_case = previous
_snake_case = node
_snake_case = self.rear
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_snake_case = node.next
_snake_case = node.prev
_snake_case = None
_snake_case = None
return node
class _a ( Generic[T, U] ):
SCREAMING_SNAKE_CASE_ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = DoubleLinkedList()
_snake_case = capacity
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = {}
def __repr__( self ) -> str:
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self ,_SCREAMING_SNAKE_CASE ) -> bool:
return key in self.cache
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_snake_case = self.cache[key]
_snake_case = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_SCREAMING_SNAKE_CASE )
return node.val
self.miss += 1
return None
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_SCREAMING_SNAKE_CASE ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case = value
self.list.add(_SCREAMING_SNAKE_CASE )
@classmethod
def _lowercase ( cls ,_SCREAMING_SNAKE_CASE = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_SCREAMING_SNAKE_CASE ) -> Callable[..., U]:
def cache_decorator_wrapper(*_SCREAMING_SNAKE_CASE ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case = LRUCache(_SCREAMING_SNAKE_CASE )
_snake_case = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case = func(*_SCREAMING_SNAKE_CASE )
cls.decorator_function_to_instance_map[func].put(args[0] ,_SCREAMING_SNAKE_CASE )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_SCREAMING_SNAKE_CASE ,"cache_info" ,_SCREAMING_SNAKE_CASE ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = 'weight'
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None ) -> Any:
"""simple docstring"""
__lowerCamelCase = torch.load(UpperCamelCase__ )
__lowerCamelCase = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCamelCase = WavLMOrig(UpperCamelCase__ )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCamelCase = WavLMConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = WavLMConfig()
__lowerCamelCase = WavLMModel(UpperCamelCase__ )
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ )
hf_wavlm.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__A = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 90 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Any = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class lowercase ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = """markuplm"""
def __init__( self ,a_=30_522 ,a_=768 ,a_=12 ,a_=12 ,a_=3_072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=512 ,a_=2 ,a_=0.02 ,a_=1E-1_2 ,a_=0 ,a_=0 ,a_=2 ,a_=256 ,a_=1_024 ,a_=216 ,a_=1_001 ,a_=32 ,a_=50 ,a_="absolute" ,a_=True ,a_=None ,**a_ ,) -> Union[str, Any]:
super().__init__(
pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,**a_ ,)
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Optional[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = position_embedding_type
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : List[Any] = classifier_dropout
# additional properties
_UpperCAmelCase : Dict = max_depth
_UpperCAmelCase : Union[str, Any] = max_xpath_tag_unit_embeddings
_UpperCAmelCase : Optional[int] = max_xpath_subs_unit_embeddings
_UpperCAmelCase : List[Any] = tag_pad_id
_UpperCAmelCase : Tuple = subs_pad_id
_UpperCAmelCase : List[str] = xpath_unit_hidden_size
| 215 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : Optional[Any] = LxmertTokenizer
a : Dict = LxmertTokenizerFast
a : Dict = True
a : Dict = True
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowerCAmelCase_ ( self , A ) -> Optional[Any]:
'''simple docstring'''
a = "UNwant\u00E9d,running"
a = "unwanted, running"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(A , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(A )
a = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
a = tokenizer.encode(A , add_special_tokens=A )
a = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
a = self.get_rust_tokenizer()
a = tokenizer.encode(A )
a = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
| 364 |
import math
import sys
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> int:
if number != int(__UpperCamelCase):
raise ValueError("the value of input must be a natural number")
if number < 0:
raise ValueError("the value of input must not be a negative number")
if number == 0:
return 1
a = [-1] * (number + 1)
a = 0
for i in range(1 , number + 1):
a = sys.maxsize
a = int(math.sqrt(__UpperCamelCase))
for j in range(1 , root + 1):
a = 1 + answers[i - (j**2)]
a = min(__UpperCamelCase , __UpperCamelCase)
a = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = UnCLIPImageVariationPipeline
A_ : Optional[int] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
A_ : Dict = IMAGE_VARIATION_BATCH_PARAMS
A_ : Dict = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
A_ : Dict = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
__lowerCAmelCase : Tuple = UnCLIPTextProjModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
__lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __lowerCamelCase ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__lowerCAmelCase : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.dummy_decoder
__lowerCAmelCase : int = self.dummy_text_proj
__lowerCAmelCase : int = self.dummy_text_encoder
__lowerCAmelCase : Any = self.dummy_tokenizer
__lowerCAmelCase : List[Any] = self.dummy_super_res_first
__lowerCAmelCase : List[str] = self.dummy_super_res_last
__lowerCAmelCase : Dict = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
__lowerCAmelCase : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=10_00 , )
__lowerCAmelCase : Dict = CLIPImageProcessor(crop_size=32 , size=32 )
__lowerCAmelCase : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
if pil_image:
__lowerCAmelCase : List[str] = input_image * 0.5 + 0.5
__lowerCAmelCase : Optional[int] = input_image.clamp(0 , 1 )
__lowerCAmelCase : Optional[int] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCAmelCase : Any = DiffusionPipeline.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = output.images
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Any = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : Union[str, Any] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = 'cpu'
__lowerCAmelCase : Optional[int] = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
__lowerCAmelCase : Dict = pipe(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = output.images
__lowerCAmelCase : List[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
__lowerCAmelCase : List[Any] = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
__lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__lowerCAmelCase : Union[str, Any] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = torch.device('cpu' )
class A__ :
A_ : Any = 1
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : Any = pipe.decoder.dtype
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : Dict = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowerCAmelCase : List[Any] = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
__lowerCAmelCase : Union[str, Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowerCAmelCase : int = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
__lowerCAmelCase : List[str] = pipeline_inputs.pop('image' )
__lowerCAmelCase : Optional[Any] = pipe.image_encoder(_SCREAMING_SNAKE_CASE ).image_embeds
__lowerCAmelCase : Union[str, Any] = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE , image_embeddings=_SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowerCAmelCase : int = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=_SCREAMING_SNAKE_CASE )
@skip_mps
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = torch_device == 'cpu'
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowerCAmelCase : str = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE )
@skip_mps
def __lowerCamelCase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __lowerCamelCase ( self ):
return super().test_save_load_local()
@skip_mps
def __lowerCamelCase ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
__lowerCAmelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
__lowerCAmelCase : int = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
__lowerCAmelCase : List[str] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipeline(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='np' , )
__lowerCAmelCase : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 15 ) | 86 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowerCAmelCase : Optional[int] = ["bert-base-uncased", "bert-base-cased"]
__lowerCAmelCase : List[str] = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class __lowerCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self : Any , _snake_case : str ):
super().__init__()
__lowercase : str = tokenizer
__lowercase : Any = AutoConfig.from_pretrained(_snake_case )
__lowercase : Union[str, Any] = TFAutoModel.from_config(_snake_case )
def snake_case_ ( self : str , _snake_case : int ):
__lowercase : Optional[Any] = self.tokenizer(_snake_case )
__lowercase : int = self.bert(**_snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : int ):
super().setUp()
__lowercase : Optional[int] = [
BertTokenizer.from_pretrained(_snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__lowercase : Optional[Any] = [TFBertTokenizer.from_pretrained(_snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(_snake_case , use_fast_bert_tokenizer=_snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowercase : Optional[int] = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowercase : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case_ ( self : List[str] ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowercase : Dict = tokenizer(_snake_case , return_tensors='''tf''' , padding='''longest''' )
__lowercase : int = tf_tokenizer(_snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def snake_case_ ( self : Union[str, Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Union[str, Any] = tf_tokenizer(self.paired_sentences )
__lowercase : List[str] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def snake_case_ ( self : Optional[Any] ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Any = tf.function(_snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
__lowercase : List[Any] = tf.constant(_snake_case )
__lowercase : Any = compiled_tokenizer(_snake_case )
__lowercase : Union[str, Any] = tf_tokenizer(_snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case_ ( self : Tuple ):
for tf_tokenizer in self.tf_tokenizers:
__lowercase : Any = ModelToSave(tokenizer=_snake_case )
__lowercase : str = tf.convert_to_tensor(self.test_sentences )
__lowercase : Union[str, Any] = model(_snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowercase : Union[str, Any] = Path(_snake_case ) / '''saved.model'''
model.save(_snake_case )
__lowercase : List[str] = tf.keras.models.load_model(_snake_case )
__lowercase : Tuple = loaded_model(_snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 156 | 0 |
"""simple docstring"""
from timeit import timeit
__snake_case = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = 0
_a = len(lowerCamelCase__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = len(lowerCamelCase__ ) // 2
_a = len(lowerCamelCase__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(lowerCamelCase__ ) )
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
if len(lowerCamelCase__ ) <= 2:
return True
if s[0] == s[len(lowerCamelCase__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return s == s[::-1]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_a = f'all({name}(key) is value for key, value in test_data.items())'
_a = f'from __main__ import test_data, {name}'
_a = 50_00_00
_a = timeit(stmt=lowerCamelCase__, setup=lowerCamelCase__, number=lowerCamelCase__ )
print(f'{name:<35} finished {number:,} runs in {result:.5f} seconds' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''') | 350 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> str:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_multiple_size
_a = hidden_act
_a = hidden_dropout
_a = attention_dropout
_a = weight_tying
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = scope
def _UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self ) -> Optional[int]:
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a , _a , _a , _a = self.prepare_config_and_inputs()
_a = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = GPTNeoXJapaneseModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = True
_a = GPTNeoXJapaneseModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
_a = True
_a = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
_a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_a = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_a = torch.cat([input_ids, next_tokens] , dim=-1 )
_a = torch.cat([input_mask, next_mask] , dim=-1 )
_a = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
_a = output_from_no_past['''hidden_states'''][0]
_a = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_a = output_from_no_past[:, -3:, random_slice_idx].detach()
_a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
A_ : Tuple = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
A_ : List[str] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
A_ : Any = False
A_ : Optional[Any] = False
A_ : Tuple = False
A_ : Optional[int] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = GPTNeoXJapaneseModelTester(self )
_a = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> str:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Tuple:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
# This regression test was failing with PyTorch < 1.3
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs_for_decoder()
_a = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a , _a , _a , _a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = '''abeja/gpt-neox-japanese-2.7b'''
_a = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
_a = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
_a = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
_a = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase )
_a = []
for prompt in prompts:
_a = tokenizer(__UpperCAmelCase , return_tensors='''pt''' ).input_ids
_a = model.generate(__UpperCAmelCase , max_length=50 )
_a = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) | 153 | 0 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 97 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowercase ( A__ ):
"""simple docstring"""
_a = ComputeEnvironment.AMAZON_SAGEMAKER
_a = True
_a = 'ml.p3.2xlarge'
_a = 'accelerate_sagemaker_execution_role'
_a = 'hf-sm'
_a = 'us-east-1'
_a = 1
_a = 'accelerate-sagemaker-1'
_a = '1.6'
_a = '4.4'
_a = 'train.py'
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
_a = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase_ )
assert isinstance(converted_args['''do_train'''] , UpperCamelCase_ )
assert isinstance(converted_args['''epochs'''] , UpperCamelCase_ )
assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase_ )
assert isinstance(converted_args['''max_steps'''] , UpperCamelCase_ )
with pytest.raises(UpperCamelCase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 97 | 1 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
for attribute in key.split("." ):
snake_case : Tuple = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
snake_case : int = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
snake_case : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
snake_case : Dict = value
elif weight_type == "weight_g":
snake_case : Optional[int] = value
elif weight_type == "weight_v":
snake_case : Optional[int] = value
elif weight_type == "bias":
snake_case : Tuple = value
else:
snake_case : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case : int = []
snake_case : List[Any] = fairseq_model.state_dict()
snake_case : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case : Tuple = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case : Tuple = True
if "*" in mapped_key:
snake_case : Union[str, Any] = name.split(__lowerCamelCase )[0].split("." )[-2]
snake_case : Any = mapped_key.replace("*" , __lowerCamelCase )
if "weight_g" in name:
snake_case : Optional[int] = "weight_g"
elif "weight_v" in name:
snake_case : Tuple = "weight_v"
elif "bias" in name:
snake_case : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case : str = "weight"
else:
snake_case : str = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Any ):
snake_case : str = full_name.split("conv_layers." )[-1]
snake_case : int = name.split("." )
snake_case : Optional[int] = int(items[0] )
snake_case : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
snake_case : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
snake_case : Dict = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
snake_case : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Dict=True ):
if config_path is not None:
snake_case : str = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
snake_case : str = UniSpeechSatConfig()
snake_case : Tuple = ""
if is_finetuned:
snake_case : Tuple = UniSpeechSatForCTC(__lowerCamelCase )
else:
snake_case : List[Any] = UniSpeechSatForPreTraining(__lowerCamelCase )
snake_case , snake_case , snake_case : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case : Dict = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Dict = """ssube/stable-diffusion-x4-upscaler-onnx"""
def SCREAMING_SNAKE_CASE__ ( self , snake_case=0 ):
lowercase = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case ) )
lowercase = torch.manual_seed(snake_case )
lowercase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**snake_case ).images
lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**snake_case ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**snake_case ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**snake_case ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**snake_case ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ort.SessionOptions()
lowercase = False
return options
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'A fantasy landscape, trending on artstation'
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case , image=snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case , output_type='np' , )
lowercase = output.images
lowercase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
lowercase = init_image.resize((128, 128) )
lowercase = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case )
lowercase = 'A fantasy landscape, trending on artstation'
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=snake_case , image=snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case , output_type='np' , )
lowercase = output.images
lowercase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 195 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase = '''Create a default config file for Accelerate with only a few flags set.'''
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE="no" , __SCREAMING_SNAKE_CASE = default_json_config_file , __SCREAMING_SNAKE_CASE = False ):
lowercase = Path(__SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowercase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowercase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
lowercase = num_gpus
lowercase = False
if num_gpus > 1:
lowercase = 'MULTI_GPU'
else:
lowercase = 'NO'
elif is_xpu_available() and use_xpu:
lowercase = torch.xpu.device_count()
lowercase = num_xpus
lowercase = False
if num_xpus > 1:
lowercase = 'MULTI_XPU'
else:
lowercase = 'NO'
elif is_npu_available():
lowercase = torch.npu.device_count()
lowercase = num_npus
lowercase = False
if num_npus > 1:
lowercase = 'MULTI_NPU'
else:
lowercase = 'NO'
else:
lowercase = 0
lowercase = True
lowercase = 1
lowercase = 'NO'
lowercase = ClusterConfig(**__SCREAMING_SNAKE_CASE )
config.to_json_file(__SCREAMING_SNAKE_CASE )
return path
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = parser.add_parser('default' , parents=__SCREAMING_SNAKE_CASE , help=__SCREAMING_SNAKE_CASE , formatter_class=__SCREAMING_SNAKE_CASE )
parser.add_argument(
'--config_file' , default=__SCREAMING_SNAKE_CASE , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=__SCREAMING_SNAKE_CASE , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 195 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A_ = '''mgp-str'''
def __init__( self: str , __A: Any=[32, 1_28] , __A: Any=4 , __A: int=3 , __A: int=27 , __A: Union[str, Any]=38 , __A: List[Any]=5_02_57 , __A: str=3_05_22 , __A: Any=7_68 , __A: Union[str, Any]=12 , __A: Any=12 , __A: int=4.0 , __A: Dict=True , __A: Any=False , __A: Any=1e-5 , __A: Optional[Any]=0.0 , __A: Dict=0.0 , __A: List[Any]=0.0 , __A: Optional[int]=False , __A: Optional[Any]=0.02 , **__A: List[str] , ) -> Dict:
super().__init__(**_a )
_A = image_size
_A = patch_size
_A = num_channels
_A = max_token_length
_A = num_character_labels
_A = num_bpe_labels
_A = num_wordpiece_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = mlp_ratio
_A = distilled
_A = layer_norm_eps
_A = drop_rate
_A = qkv_bias
_A = attn_drop_rate
_A = drop_path_rate
_A = output_aa_attentions
_A = initializer_range
| 355 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __A ( _lowercase ):
'''simple docstring'''
_A = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
_A = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_A = [3, 3, 3, 3]
_A = [5, 5, 5, 5]
elif "fl4" in model_name:
_A = [4, 4, 4, 4]
_A = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_A = [3, 3, 3, 3]
if "lrf" in model_name:
_A = [3, 3, 3, 3]
else:
_A = [2, 2, 2, 2]
if "tiny" in model_name:
_A = 96
elif "small" in model_name:
_A = 96
elif "base" in model_name:
_A = 1_28
elif "large" in model_name:
_A = 1_92
elif "xlarge" in model_name:
_A = 2_56
elif "huge" in model_name:
_A = 3_52
# set label information
_A = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_A = '''imagenet-22k-id2label.json'''
else:
_A = '''imagenet-1k-id2label.json'''
_A = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_lowercase ): v for k, v in idalabel.items()}
_A = {v: k for k, v in idalabel.items()}
_A = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def __A ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_A = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_A = '''encoder.''' + name
if "encoder.layers" in name:
_A = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
_A = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
_A = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_A = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_A = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_A = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
_A = '''layernorm.weight'''
if name == "norm.bias":
_A = '''layernorm.bias'''
if "head" in name:
_A = name.replace('''head''' , '''classifier''' )
else:
_A = '''focalnet.''' + name
return name
def __A ( _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
_A = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_A = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _lowercase )
_A = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
_A = state_dict.pop(_lowercase )
_A = val
_A = get_focalnet_config(_lowercase )
_A = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
_A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_A = BitImageProcessor(
do_resize=_lowercase , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=2_24 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
_A = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
_A = processor(images=_lowercase , return_tensors='''pt''' )
_A = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_A = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1e-4 )
_A = model(**_lowercase )
_A = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_A = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
_A = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
_A = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
_A = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
_A = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
_A = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 75 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : List[Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a : int = logging.getLogger(__name__)
@dataclass
class a :
"""simple docstring"""
a : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
a : bool = field(
default=lowercase__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
a : Optional[int] = field(
default=lowercase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class a :
"""simple docstring"""
a : str = field(
default=lowercase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a : str = field(
default=lowercase__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a : Optional[str] = field(
default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a : Optional[bool] = field(
default=lowercase__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a : bool = field(
default=lowercase__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
a : bool = field(
default=lowercase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : List[Any] = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
__UpperCAmelCase : Tuple = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
__UpperCAmelCase : List[Any] = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : str = train_dataset.features["""label"""].names
if training_args.do_eval:
__UpperCAmelCase : Any = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : str = eval_dataset.features["""label"""].names
if training_args.do_predict:
__UpperCAmelCase : Optional[Any] = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : List[str] = predict_dataset.features["""label"""].names
# Labels
__UpperCAmelCase : Tuple = len(__lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
__UpperCAmelCase : List[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__UpperCAmelCase : List[Any] = False
def preprocess_function(__lowerCamelCase : int ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCamelCase , max_length=data_args.max_seq_length , truncation=__lowerCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
__UpperCAmelCase : int = min(len(__lowerCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Dict = train_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase : Union[str, Any] = train_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Tuple = min(len(__lowerCamelCase ) , data_args.max_eval_samples )
__UpperCAmelCase : List[str] = eval_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase : Dict = eval_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
__UpperCAmelCase : Dict = min(len(__lowerCamelCase ) , data_args.max_predict_samples )
__UpperCAmelCase : Tuple = predict_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
__UpperCAmelCase : Any = predict_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
__UpperCAmelCase : Tuple = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCamelCase : EvalPrediction ):
__UpperCAmelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
__UpperCAmelCase : str = np.argmax(__lowerCamelCase , axis=1 )
return metric.compute(predictions=__lowerCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__UpperCAmelCase : Any = default_data_collator
elif training_args.fpaa:
__UpperCAmelCase : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
__UpperCAmelCase : int = None
# Initialize our Trainer
__UpperCAmelCase : Union[str, Any] = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[str] = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : Union[str, Any] = last_checkpoint
__UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase )
__UpperCAmelCase : Dict = train_result.metrics
__UpperCAmelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
__UpperCAmelCase : Dict = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __lowerCamelCase )
trainer.save_metrics("""train""" , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=__lowerCamelCase )
__UpperCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
__UpperCAmelCase : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics("""eval""" , __lowerCamelCase )
trainer.save_metrics("""eval""" , __lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = trainer.predict(__lowerCamelCase , metric_key_prefix="""predict""" )
__UpperCAmelCase : int = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase )
)
__UpperCAmelCase : Optional[int] = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics("""predict""" , __lowerCamelCase )
trainer.save_metrics("""predict""" , __lowerCamelCase )
__UpperCAmelCase : Optional[int] = np.argmax(__lowerCamelCase , axis=1 )
__UpperCAmelCase : Tuple = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__lowerCamelCase ):
__UpperCAmelCase : Tuple = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 114 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase__ : str ={
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Any = '''retribert'''
def __init__( self , _A=30_522 , _A=768 , _A=8 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=2 , _A=0.0_2 , _A=1e-12 , _A=True , _A=128 , _A=0 , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = share_encoders
__SCREAMING_SNAKE_CASE = projection_dim
| 118 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , _A , _A , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
__SCREAMING_SNAKE_CASE = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
__SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split='train' , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _A , _A , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = name
__SCREAMING_SNAKE_CASE = con
__SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = to_sql_kwargs
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('sql' , _A )
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('con' , _A )
__SCREAMING_SNAKE_CASE = self.to_sql_kwargs.pop('index' , _A )
__SCREAMING_SNAKE_CASE = self._write(index=_A , **self.to_sql_kwargs )
return written
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
__SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
__SCREAMING_SNAKE_CASE = batch.to_pandas()
__SCREAMING_SNAKE_CASE = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def _A ( self , _A , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 118 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _a ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
UpperCAmelCase = '''The dog is cute and lives in the garden house'''
UpperCAmelCase = jnp.array([tokenizer.encode(__UpperCAmelCase )] )
UpperCAmelCase = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
UpperCAmelCase = model(__UpperCAmelCase )['''last_hidden_state''']
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __UpperCAmelCase , atol=1E-3 ) )
| 34 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 0 |
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __lowercase ( __lowercase ) -> Any:
'''simple docstring'''
_A = 0
_A = len(SCREAMING_SNAKE_CASE_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = len(SCREAMING_SNAKE_CASE_ ) // 2
_A = len(SCREAMING_SNAKE_CASE_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE_ ) )
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
return s == s[::-1]
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = F'''all({name}(key) is value for key, value in test_data.items())'''
_A = F'''from __main__ import test_data, {name}'''
_A = 50_0000
_A = timeit(stmt=SCREAMING_SNAKE_CASE_ , setup=SCREAMING_SNAKE_CASE_ , number=SCREAMING_SNAKE_CASE_ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''')
| 364 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase_ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : List[str]=18 , __UpperCAmelCase : Union[str, Any]=30 , __UpperCAmelCase : Union[str, Any]=400 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Union[str, Any]=None , ):
'''simple docstring'''
_A = size if size is not None else {"height": 20, "width": 20}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = size
_A = do_normalize
_A = do_convert_rgb
_A = [512, 1024, 2048, 4096]
_A = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_A = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = PixaStructImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processor_tester.prepare_dummy_image()
_A = self.image_processing_class(**self.image_processor_dict )
_A = 2048
_A = image_processor(__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__UpperCAmelCase ):
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
_A = "Hello"
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase , header_text=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = PixaStructImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = PixaStructImageProcessingTester(self , num_channels=4 )
_A = 3
@property
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
__UpperCAmelCase , return_tensors="pt" , max_patches=__UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 174 | 0 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class a_ ( snake_case_ ):
'''simple docstring'''
def __lt__( self , A ) -> Tuple:
return self[-1] < other[-1]
def __eq__( self , A ) -> List[str]:
return self[-1] == other[-1]
def lowerCamelCase ( __lowerCamelCase : list ) ->list:
_SCREAMING_SNAKE_CASE = []
# sort into stacks
for element in collection:
_SCREAMING_SNAKE_CASE = Stack([element] )
_SCREAMING_SNAKE_CASE = bisect_left(__lowerCamelCase , __lowerCamelCase )
if i != len(__lowerCamelCase ):
stacks[i].append(__lowerCamelCase )
else:
stacks.append(__lowerCamelCase )
# use a heap-based merge to merge stack efficiently
_SCREAMING_SNAKE_CASE = merge(*(reversed(__lowerCamelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 58 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase ( __lowerCamelCase : str ) ->str:
if not sentence:
return ""
_SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58 | 1 |
"""simple docstring"""
import argparse
import os
import re
UpperCAmelCase__ = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
UpperCAmelCase__ = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
UpperCAmelCase__ = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def __UpperCAmelCase ( lowercase ,lowercase = False ):
"""simple docstring"""
with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = content.split("""\n""" )
_UpperCAmelCase = []
_UpperCAmelCase = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase = len(re.search(R"""^(\s*)\S""" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase = sorted(lowercase ,key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def __UpperCAmelCase ( lowercase = False ):
"""simple docstring"""
_UpperCAmelCase = [os.path.join(lowercase ,lowercase ) for f in os.listdir(lowercase ) if f.endswith(""".py""" )]
_UpperCAmelCase = [sort_auto_mapping(lowercase ,overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_UpperCAmelCase = [f for f, d in zip(lowercase ,lowercase ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCAmelCase__ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 30 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def __UpperCAmelCase ( lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = []
# fmt: off
# stem:
rename_keys.append(("""cls_token""", """vit.embeddings.cls_token""") )
rename_keys.append(("""pos_embed""", """vit.embeddings.position_embeddings""") )
rename_keys.append(("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias""") )
# backbone
rename_keys.append(("""patch_embed.backbone.stem.conv.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.weight""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight""") )
rename_keys.append(("""patch_embed.backbone.stem.norm.bias""", """vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias""") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((f'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', f'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
# fmt: on
return rename_keys
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_UpperCAmelCase = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase = in_proj_bias[: config.hidden_size]
_UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase ,lowercase )
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = dct.pop(lowercase )
_UpperCAmelCase = val
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
_UpperCAmelCase = BitConfig(
global_padding="""same""" ,layer_type="""bottleneck""" ,depths=(3, 4, 9) ,out_features=["""stage3"""] ,embedding_dynamic_padding=lowercase ,)
_UpperCAmelCase = ViTHybridConfig(backbone_config=lowercase ,image_size=3_84 ,num_labels=10_00 )
_UpperCAmelCase = False
# load original model from timm
_UpperCAmelCase = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCAmelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
_UpperCAmelCase = create_rename_keys(lowercase ,lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,lowercase ,lowercase )
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCAmelCase = ViTHybridModel(lowercase ).eval()
else:
_UpperCAmelCase = ViTHybridForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# create image processor
_UpperCAmelCase = create_transform(**resolve_data_config({} ,model=lowercase ) )
_UpperCAmelCase = transform.transforms
_UpperCAmelCase = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
_UpperCAmelCase = ViTHybridImageProcessor(
do_resize=lowercase ,size={"""shortest_edge""": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=lowercase ,crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} ,do_normalize=lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = transform(lowercase ).unsqueeze(0 )
_UpperCAmelCase = processor(lowercase ,return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase ,lowercase )
# verify logits
with torch.no_grad():
_UpperCAmelCase = model(lowercase )
_UpperCAmelCase = outputs.logits
print("""Predicted class:""" ,logits.argmax(-1 ).item() )
if base_model:
_UpperCAmelCase = timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase ,outputs.pooler_output ,atol=1E-3 )
else:
_UpperCAmelCase = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase ,outputs.logits ,atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(f'''ybelkada/{vit_name}''' )
processor.push_to_hub(f'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
UpperCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 30 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> int:
lowercase_ : Any = None
if token is not None:
lowercase_ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ : str = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowercase_ : Tuple = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ ).json()
lowercase_ : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowercase_ : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = requests.get(url + F'''&page={i + 2}''' , headers=UpperCAmelCase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any=None ) -> int:
lowercase_ : List[Any] = None
if token is not None:
lowercase_ : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ : List[Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
lowercase_ : List[Any] = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ ).json()
lowercase_ : str = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowercase_ : Any = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCAmelCase__ ):
lowercase_ : Any = requests.get(url + F'''&page={i + 2}''' , headers=UpperCAmelCase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Optional[int]:
lowercase_ : str = None
if token is not None:
lowercase_ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ : Dict = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ , allow_redirects=UpperCAmelCase__ )
lowercase_ : List[str] = result.headers["""Location"""]
lowercase_ : Tuple = requests.get(UpperCAmelCase__ , allow_redirects=UpperCAmelCase__ )
lowercase_ : Optional[Any] = os.path.join(UpperCAmelCase__ , F'''{artifact_name}.zip''' )
with open(UpperCAmelCase__ , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict=None ) -> Optional[int]:
lowercase_ : int = []
lowercase_ : List[Any] = []
lowercase_ : Tuple = None
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCAmelCase__ ) as f:
for line in f:
lowercase_ : List[str] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase_ : List[str] = line[: line.index(""": """ )]
lowercase_ : int = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowercase_ : int = line[len("""FAILED """ ) :]
failed_tests.append(UpperCAmelCase__ )
elif filename == "job_name.txt":
lowercase_ : List[Any] = line
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase__ )} for `errors` '''
F'''and {len(UpperCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
lowercase_ : Optional[int] = None
if job_name and job_links:
lowercase_ : Tuple = job_links.get(UpperCAmelCase__ , UpperCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
lowercase_ : Tuple = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
return result
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]=None ) -> List[Any]:
lowercase_ : Optional[int] = []
lowercase_ : List[str] = [os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) for p in os.listdir(UpperCAmelCase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCAmelCase__ , job_links=UpperCAmelCase__ ) )
return errors
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any]=None ) -> int:
lowercase_ : Any = Counter()
counter.update([x[1] for x in logs] )
lowercase_ : Tuple = counter.most_common()
lowercase_ : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase_ : Any = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase_ : Union[str, Any] = dict(sorted(r.items() , key=lambda UpperCAmelCase__ : item[1]["count"] , reverse=UpperCAmelCase__ ) )
return r
def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[str]:
lowercase_ : Optional[int] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowercase_ : Optional[Any] = test.split("""/""" )[2]
else:
lowercase_ : int = None
return test
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None ) -> Optional[int]:
lowercase_ : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowercase_ : Optional[Any] = [x for x in logs if x[2] is not None]
lowercase_ : str = {x[2] for x in logs}
lowercase_ : Optional[Any] = {}
for test in tests:
lowercase_ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowercase_ : Dict = counter.most_common()
lowercase_ : List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase_ : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowercase_ : int = {"""count""": n_errors, """errors""": error_counts}
lowercase_ : List[Any] = dict(sorted(r.items() , key=lambda UpperCAmelCase__ : item[1]["count"] , reverse=UpperCAmelCase__ ) )
return r
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Tuple:
lowercase_ : List[Any] = """| no. | error | status |"""
lowercase_ : Any = """|-:|:-|:-|"""
lowercase_ : Any = [header, sep]
for error in reduced_by_error:
lowercase_ : Optional[Any] = reduced_by_error[error]["""count"""]
lowercase_ : List[Any] = F'''| {count} | {error[:100]} | |'''
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> Dict:
lowercase_ : Union[str, Any] = """| model | no. of errors | major error | count |"""
lowercase_ : Optional[int] = """|-:|-:|-:|-:|"""
lowercase_ : Optional[int] = [header, sep]
for model in reduced_by_model:
lowercase_ : Dict = reduced_by_model[model]["""count"""]
lowercase_ , lowercase_ : str = list(reduced_by_model[model]["""errors"""].items() )[0]
lowercase_ : Optional[int] = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_lowercase : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowercase : Tuple = get_job_links(args.workflow_run_id, token=args.token)
_lowercase : Any = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowercase : Tuple = k.find(" / ")
_lowercase : int = k[index + len(" / ") :]
_lowercase : Optional[Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowercase : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowercase : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowercase : List[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowercase : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowercase : Dict = reduce_by_error(errors)
_lowercase : int = reduce_by_model(errors)
_lowercase : str = make_github_table(reduced_by_error)
_lowercase : int = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 239 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __magic_name__ :
def __init__( self : str , lowercase_ : Dict ):
if isinstance(lowercase_ , lowercase_ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase_ : List[Any] = deepcopy(lowercase_ )
elif os.path.exists(lowercase_ ):
with io.open(lowercase_ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ : Union[str, Any] = json.load(lowercase_ )
else:
try:
lowercase_ : int = baseaa.urlsafe_baadecode(lowercase_ ).decode("""utf-8""" )
lowercase_ : str = json.loads(lowercase_ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
lowercase_ : Any = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowercase_ : Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowercase_ : str = False
if self.is_zeroa() or self.is_zeroa():
lowercase_ : Dict = set(["""cpu""", """nvme"""] )
lowercase_ : List[Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase_ : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Any ):
lowercase_ : Optional[Any] = self.config
# find the config node of interest if it exists
lowercase_ : Tuple = ds_key_long.split(""".""" )
lowercase_ : Union[str, Any] = nodes.pop()
for node in nodes:
lowercase_ : List[str] = config.get(lowercase_ )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] , lowercase_ : List[str]=None ):
lowercase_ , lowercase_ : List[Any] = self.find_config_node(lowercase_ )
if config is None:
return default
return config.get(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : int=False ):
lowercase_ : int = self.config
# find the config node of interest if it exists
lowercase_ : Dict = ds_key_long.split(""".""" )
for node in nodes:
lowercase_ : List[Any] = config
lowercase_ : Dict = config.get(lowercase_ )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple ):
lowercase_ : str = self.get_value(lowercase_ )
return False if value is None else bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.get_value(lowercase_ )
return False if value is None else not bool(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return self._offload
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] ):
lowercase_ : Any = engine
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , **lowercase_ : str ):
# runs backpropagation and handles mixed precision
self.engine.backward(lowercase_ , **lowercase_ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , device_placement=lowercase_ , scaler=lowercase_ )
lowercase_ : Any = hasattr(self.optimizer , """overflow""" )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
super().__init__(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __magic_name__ :
def __init__( self : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str]=0.0_01 , lowercase_ : List[str]=0 , **lowercase_ : List[Any] ):
lowercase_ : str = params
lowercase_ : List[Any] = lr
lowercase_ : int = weight_decay
lowercase_ : Union[str, Any] = kwargs
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str]=None , lowercase_ : int=0 , **lowercase_ : int ):
lowercase_ : Union[str, Any] = optimizer
lowercase_ : List[str] = total_num_steps
lowercase_ : Dict = warmup_num_steps
lowercase_ : Dict = kwargs
| 239 | 1 |
def lowerCamelCase__ ( a__ : int , a__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( a__ : Dict ) -> List[Any]:
UpperCamelCase_ = {}
UpperCamelCase_ = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
UpperCamelCase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
_A = HfArgumentParser(PretokenizationArguments)
_A = parser.parse_args()
if args.num_workers is None:
_A = multiprocessing.cpu_count()
_A = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_A = time.time()
_A = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_A = time.time()
_A = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_A = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 261 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Dict = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 267 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase : Dict = 'docs/source/en/_toctree.yml'
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = defaultdict(a )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE_ : Tuple = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE_ : int = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE_ : List[Any] = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(a ) > 1:
raise ValueError(
f"{duplicate_key} is present several times in the documentation table of content at "
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(a , key=lambda a : s["title"].lower() )
def A_ ( a=False ):
"""simple docstring"""
with open(a , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : str = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE_ : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE_ : List[str] = content[api_idx]['sections']
# Then to the model doc
SCREAMING_SNAKE_CASE_ : List[str] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = api_doc[model_idx]['sections']
SCREAMING_SNAKE_CASE_ : List[str] = [(idx, section) for idx, section in enumerate(a ) if 'sections' in section]
SCREAMING_SNAKE_CASE_ : List[Any] = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE_ : Tuple = modality_doc['sections']
SCREAMING_SNAKE_CASE_ : int = clean_model_doc_toc(a )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE_ : List[str] = True
if overwrite:
SCREAMING_SNAKE_CASE_ : Optional[int] = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE_ : List[Any] = model_doc
SCREAMING_SNAKE_CASE_ : List[Any] = api_doc
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a , allow_unicode=a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase : List[str] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 253 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
@lru_cache()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ = bs[:]
SCREAMING_SNAKE_CASE__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int]="replace" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : Tuple="</s>" , __lowerCamelCase : Union[str, Any]="</s>" , __lowerCamelCase : str="<s>" , __lowerCamelCase : Optional[int]="<unk>" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : List[Any]="<mask>" , __lowerCamelCase : List[str]=False , **__lowerCamelCase : Optional[int] , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
SCREAMING_SNAKE_CASE__ = json.load(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[1:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowercase_ ( self : Any ) -> int:
return len(self.encoder )
def lowercase_ ( self : List[str] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : int ) -> Dict:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = []
for token in re.findall(self.pat , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase_ ( self : int , __lowerCamelCase : Any ) -> List[str]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[str] , __lowerCamelCase : str ) -> Optional[int]:
return self.decoder.get(__lowerCamelCase )
def lowercase_ ( self : Any , __lowerCamelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ''''''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase_ ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' )
SCREAMING_SNAKE_CASE__ = 0
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
SCREAMING_SNAKE_CASE__ = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False , **__lowerCamelCase : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ = ''' ''' + text
return (text, kwargs)
| 351 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = ["input_features"]
def __init__( self : Dict , __lowerCamelCase : Tuple=80 , __lowerCamelCase : List[Any]=1_6000 , __lowerCamelCase : Optional[int]=160 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : List[Any]=400 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : str=False , **__lowerCamelCase : List[str] , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = n_fft
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = chunk_length
SCREAMING_SNAKE_CASE__ = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE__ = self.n_samples // hop_length
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def lowercase_ ( self : int , __lowerCamelCase : np.array ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = spectrogram(
__lowerCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
SCREAMING_SNAKE_CASE__ = log_spec[:, :-1]
SCREAMING_SNAKE_CASE__ = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ ( __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase , np.intaa )
SCREAMING_SNAKE_CASE__ = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
normed_input_values.append(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[str] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "max_length" , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE__ = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE__ = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE__ = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE__ = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE__ = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def lowercase_ ( self : str ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 218 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowerCAmelCase = 0
lowerCAmelCase = str(snake_case__ )
while len(snake_case__ ) != 1:
lowerCAmelCase = [int(snake_case__ ) for i in num_string]
lowerCAmelCase = 1
for i in range(0 , len(snake_case__ ) ):
total *= numbers[i]
lowerCAmelCase = str(snake_case__ )
steps += 1
return steps
def lowercase (snake_case__ : int ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowerCAmelCase = 0
lowerCAmelCase = str(snake_case__ )
while len(snake_case__ ) != 1:
lowerCAmelCase = [int(snake_case__ ) for i in num_string]
lowerCAmelCase = 0
for i in range(0 , len(snake_case__ ) ):
total += numbers[i]
lowerCAmelCase = str(snake_case__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | 1 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 217 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = ["pixel_values"]
def __init__(self ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = PILImageResampling.BILINEAR ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = True ,_lowerCamelCase = 1 / 255 ,_lowerCamelCase = True ,_lowerCamelCase = None ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> None:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = size if size is not None else {'''shortest_edge''': 256}
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
__lowercase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = PILImageResampling.BICUBIC ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(_lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=_lowerCamelCase )
return resize(_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
__lowercase = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ) -> np.ndarray:
'''simple docstring'''
return rescale(_lowerCamelCase ,scale=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = None ,**_lowerCamelCase ,) -> np.ndarray:
'''simple docstring'''
return normalize(_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ,data_format=_lowerCamelCase ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = None ,_lowerCamelCase = ChannelDimension.FIRST ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_lowerCamelCase ,default_to_square=_lowerCamelCase )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_lowerCamelCase ,param_name='''crop_size''' )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(_lowerCamelCase )
if not valid_images(_lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_lowerCamelCase ,size=_lowerCamelCase ,resample=_lowerCamelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=_lowerCamelCase ,size=_lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_lowerCamelCase ,scale=_lowerCamelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_lowerCamelCase ,mean=_lowerCamelCase ,std=_lowerCamelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCamelCase ,_lowerCamelCase ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=_lowerCamelCase ,tensor_type=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> str:
'''simple docstring'''
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowerCamelCase ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(_lowerCamelCase ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=_lowerCamelCase )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowerCamelCase )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 217 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase=False )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
_UpperCAmelCase = 'segformer.encoder.' + key
if key.startswith('backbone' ):
_UpperCAmelCase = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
_UpperCAmelCase = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(__lowerCAmelCase )-1}""" )
if "norm" in key:
_UpperCAmelCase = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
_UpperCAmelCase = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(__lowerCAmelCase )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_UpperCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase = key[key.find('block' ) + len('block' )]
_UpperCAmelCase = key.replace(F"""block{idx}""" , F"""block.{int(__lowerCAmelCase )-1}""" )
if "attn.q" in key:
_UpperCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_UpperCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_UpperCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_UpperCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_UpperCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_UpperCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_UpperCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
_UpperCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
_UpperCAmelCase = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(__lowerCAmelCase )-1}""" )
if key.startswith('head' ):
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[str]:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def __A ( )-> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Tuple:
"""simple docstring"""
_UpperCAmelCase = SegformerConfig()
_UpperCAmelCase = False
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
if "segformer" in model_name:
_UpperCAmelCase = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
_UpperCAmelCase = 150
_UpperCAmelCase = 'ade20k-id2label.json'
_UpperCAmelCase = (1, 150, 128, 128)
elif "city" in model_name:
_UpperCAmelCase = 19
_UpperCAmelCase = 'cityscapes-id2label.json'
_UpperCAmelCase = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_UpperCAmelCase = True
_UpperCAmelCase = model_name[4:6]
_UpperCAmelCase = 1_000
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 256
elif size == "b2":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 4, 6, 3]
elif size == "b3":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 4, 18, 3]
elif size == "b4":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 8, 27, 3]
elif size == "b5":
_UpperCAmelCase = [64, 128, 320, 512]
_UpperCAmelCase = 768
_UpperCAmelCase = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowerCAmelCase , align=__lowerCAmelCase , do_random_crop=__lowerCAmelCase )
# prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )
else:
_UpperCAmelCase = torch.load(__lowerCAmelCase , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
_UpperCAmelCase = rename_keys(__lowerCAmelCase , encoder_only=__lowerCAmelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__lowerCAmelCase , __lowerCAmelCase )
# create HuggingFace model and load state dict
if encoder_only:
_UpperCAmelCase = False
_UpperCAmelCase = SegformerForImageClassification(__lowerCAmelCase )
else:
_UpperCAmelCase = SegformerForSemanticSegmentation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# forward pass
_UpperCAmelCase = model(__lowerCAmelCase )
_UpperCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_UpperCAmelCase = torch.tensor(
[
[
[-1.1372E01, -1.2787E01, -1.3477E01],
[-1.2536E01, -1.4194E01, -1.4409E01],
[-1.3217E01, -1.4888E01, -1.5327E01],
],
[
[-1.4791E01, -1.7122E01, -1.8277E01],
[-1.7163E01, -1.9192E01, -1.9533E01],
[-1.7897E01, -1.9991E01, -2.0315E01],
],
[
[7.6723E-01, 4.1921E-01, -7.7878E-02],
[4.7772E-01, 9.5557E-03, -2.8082E-01],
[3.6032E-01, -2.4826E-01, -5.1168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_UpperCAmelCase = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_UpperCAmelCase = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_a = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 39 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class snake_case__ (datasets.BuilderConfig ):
"""simple docstring"""
__lowerCAmelCase :Optional[datasets.Features] = None
class snake_case__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowerCAmelCase :Dict = PandasConfig
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a__ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowercase , (str, list, tuple) ):
a__ : Optional[int] = data_files
if isinstance(__lowercase , __lowercase ):
a__ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a__ : str = [dl_manager.iter_files(__lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase ):
a__ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a__ : Dict = [dl_manager.iter_files(__lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a__ : Tuple = table_cast(__lowercase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__lowercase ) ):
with open(__lowercase , """rb""" ) as f:
a__ : str = pa.Table.from_pandas(pd.read_pickle(__lowercase ) )
yield i, self._cast_table(__lowercase )
| 170 | 0 |
from __future__ import annotations
import queue
class lowercase :
'''simple docstring'''
def __init__(self , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = data
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def UpperCamelCase_( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
UpperCAmelCase__ = input('Enter the value of the root node: ' ).strip().lower()
UpperCAmelCase__ = queue.Queue()
UpperCAmelCase__ = TreeNode(int(snake_case__ ) )
q.put(snake_case__ )
while not q.empty():
UpperCAmelCase__ = q.get()
UpperCAmelCase__ = f"Enter the left node of {node_found.data}: "
UpperCAmelCase__ = input(snake_case__ ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ = TreeNode(int(snake_case__ ) )
UpperCAmelCase__ = left_node
q.put(snake_case__ )
UpperCAmelCase__ = f"Enter the right node of {node_found.data}: "
UpperCAmelCase__ = input(snake_case__ ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ = TreeNode(int(snake_case__ ) )
UpperCAmelCase__ = right_node
q.put(snake_case__ )
raise
def UpperCamelCase_( snake_case__: TreeNode ) -> None:
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def UpperCamelCase_( snake_case__: TreeNode ) -> None:
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def UpperCamelCase_( snake_case__: TreeNode ) -> None:
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def UpperCamelCase_( snake_case__: TreeNode ) -> None:
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
UpperCAmelCase__ = queue.Queue()
q.put(snake_case__ )
while not q.empty():
UpperCAmelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def UpperCamelCase_( snake_case__: TreeNode ) -> None:
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
UpperCAmelCase__ = queue.Queue()
q.put(snake_case__ )
while not q.empty():
UpperCAmelCase__ = []
while not q.empty():
UpperCAmelCase__ = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case__ )
def UpperCamelCase_( snake_case__: TreeNode ) -> None:
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
UpperCAmelCase__ = []
UpperCAmelCase__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(snake_case__ )
UpperCAmelCase__ = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ = stack.pop()
# start to traverse its right child
UpperCAmelCase__ = n.right
def UpperCamelCase_( snake_case__: TreeNode ) -> None:
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
UpperCAmelCase__ = []
UpperCAmelCase__ = node
while n or stack:
while n:
stack.append(snake_case__ )
UpperCAmelCase__ = n.left
UpperCAmelCase__ = stack.pop()
print(n.data , end=',' )
UpperCAmelCase__ = n.right
def UpperCamelCase_( snake_case__: TreeNode ) -> None:
if not isinstance(snake_case__ , snake_case__ ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ = [], []
UpperCAmelCase__ = node
stacka.append(snake_case__ )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def UpperCamelCase_( snake_case__: str = "" , snake_case__: Optional[Any]=50 , snake_case__: str="*" ) -> str:
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ = divmod(width - len(snake_case__ ) - 2 , 2 )
return f"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
_UpperCamelCase = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 335 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
lowerCamelCase : Any = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__A , cache_dir=__A )
lowerCamelCase : int = [t[-1] for t in os.walk(os.path.join(__A , os.listdir(__A )[0] , "snapshots" ) )]
lowerCamelCase : Dict = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=__A )
lowerCamelCase : List[str] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase : Any = jax.random.PRNGKey(0 )
lowerCamelCase : Dict = 4
lowerCamelCase : Tuple = jax.device_count()
lowerCamelCase : List[Any] = num_samples * [prompt]
lowerCamelCase : List[str] = pipeline.prepare_inputs(__A )
# shard inputs and rng
lowerCamelCase : str = replicate(__A )
lowerCamelCase : Dict = jax.random.split(__A , __A )
lowerCamelCase : str = shard(__A )
lowerCamelCase : Union[str, Any] = pipeline(__A , __A , __A , __A , jit=__A ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(__A , dtype=np.floataa ).sum() - 4_9947.875 ) < 5e-1
lowerCamelCase : Any = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__A ) == num_samples
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=__A )
lowerCamelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase : List[Any] = 50
lowerCamelCase : Tuple = jax.device_count()
lowerCamelCase : List[Any] = num_samples * [prompt]
lowerCamelCase : List[str] = pipeline.prepare_inputs(__A )
# shard inputs and rng
lowerCamelCase : str = replicate(__A )
lowerCamelCase : Dict = jax.random.split(__A , __A )
lowerCamelCase : Dict = shard(__A )
lowerCamelCase : List[str] = pipeline(__A , __A , __A , __A , jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(__A , dtype=np.floataa ).sum() - 238_3808.2) ) < 5e-1
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__A )
lowerCamelCase : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase : Optional[int] = jax.random.PRNGKey(0 )
lowerCamelCase : int = 50
lowerCamelCase : str = jax.device_count()
lowerCamelCase : str = num_samples * [prompt]
lowerCamelCase : int = pipeline.prepare_inputs(__A )
# shard inputs and rng
lowerCamelCase : Tuple = replicate(__A )
lowerCamelCase : Tuple = jax.random.split(__A , __A )
lowerCamelCase : List[Any] = shard(__A )
lowerCamelCase : Optional[Any] = pipeline(__A , __A , __A , __A , jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__A , dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
lowerCamelCase : List[Any] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0 )
lowerCamelCase : List[str] = 50
lowerCamelCase : Any = jax.device_count()
lowerCamelCase : str = num_samples * [prompt]
lowerCamelCase : Optional[int] = pipeline.prepare_inputs(__A )
# shard inputs and rng
lowerCamelCase : int = replicate(__A )
lowerCamelCase : int = jax.random.split(__A , __A )
lowerCamelCase : str = shard(__A )
lowerCamelCase : int = pipeline(__A , __A , __A , __A , jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__A , dtype=np.floataa ).sum() - 237_3516.75) ) < 5e-1
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=__A , steps_offset=1 , )
lowerCamelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=__A , safety_checker=__A , )
lowerCamelCase : int = scheduler.create_state()
lowerCamelCase : Union[str, Any] = scheduler_state
lowerCamelCase : Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase : List[Any] = jax.random.PRNGKey(0 )
lowerCamelCase : str = 50
lowerCamelCase : int = jax.device_count()
lowerCamelCase : Dict = num_samples * [prompt]
lowerCamelCase : List[Any] = pipeline.prepare_inputs(__A )
# shard inputs and rng
lowerCamelCase : Any = replicate(__A )
lowerCamelCase : Union[str, Any] = jax.random.split(__A , __A )
lowerCamelCase : Any = shard(__A )
lowerCamelCase : str = pipeline(__A , __A , __A , __A , jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(__A , dtype=np.floataa ).sum() - 234_7693.5) ) < 5e-1
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
lowerCamelCase : Any = jax.device_count()
lowerCamelCase : List[str] = num_samples * [prompt]
lowerCamelCase : List[Any] = jax.random.split(jax.random.PRNGKey(0 ) , __A )
lowerCamelCase : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__A , )
lowerCamelCase : Union[str, Any] = replicate(__A )
lowerCamelCase : Dict = pipeline.prepare_inputs(__A )
lowerCamelCase : Optional[int] = shard(__A )
lowerCamelCase : int = pipeline(__A , __A , __A , jit=__A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
lowerCamelCase : int = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
lowerCamelCase : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=__A , use_memory_efficient_attention=__A , )
lowerCamelCase : Tuple = replicate(__A )
lowerCamelCase : Any = pipeline.prepare_inputs(__A )
lowerCamelCase : Tuple = shard(__A )
lowerCamelCase : str = pipeline(__A , __A , __A , jit=__A ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
lowerCamelCase : List[Any] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 283 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCamelCase ( *A : Dict , **A : Optional[int] ) ->Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self : Any , A : List[str] , A : Tuple , A : List[str] ) ->List[Any]:
lowerCamelCase__ : List[str] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : Union[str, Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __lowerCamelCase ( self : List[Any] , A : Optional[int] , A : Tuple ) ->Optional[Any]:
lowerCamelCase__ : str = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase__ : Union[str, Any] = len(A )
self.assertGreater(A , 0 )
self.assertEqual(
A , [
{
'''score''': ANY(A ),
'''label''': ANY(A ),
'''box''': {'''xmin''': ANY(A ), '''ymin''': ANY(A ), '''xmax''': ANY(A ), '''ymax''': ANY(A )},
}
for i in range(A )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : Dict ) ->List[Any]:
pass
@require_torch
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : List[Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCamelCase__ : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Tuple = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCamelCase__ : List[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
pass
@require_torch
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Optional[Any] = 0.2
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 142 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase__ ( a , a , a , a ) -> Any:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
_A: List[str] = TOKENIZER_CLASSES
else:
_A: Optional[int] = {tokenizer_name: getattr(a , tokenizer_name + '''Fast''' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
_A: Tuple = TOKENIZER_CLASSES[tokenizer_name]
_A: List[Any] = True
if checkpoint_name is None:
_A: Tuple = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_A: str = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
_A: Union[str, Any] = tokenizer_class.from_pretrained(a , force_download=a )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
_A , _A: Optional[Any] = checkpoint.split('''/''' )
_A: Any = os.path.join(a , a )
elif add_prefix:
_A: List[str] = checkpoint
_A: Optional[Any] = dump_path
else:
_A: Any = None
_A: Optional[int] = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_A: int = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_A: Optional[Any] = file_path.split(a )[-1][0]
if next_char == "/":
_A: Optional[int] = os.path.join(a , a )
_A: List[Any] = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
_A: Tuple = tokenizer.save_pretrained(
a , legacy_format=a , filename_prefix=a )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(a )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
UpperCAmelCase__ : int = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 301 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ : Union[str, Any] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class _SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self : Tuple , **UpperCAmelCase_ : Union[str, Any]) ->Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(UpperCAmelCase_)
def __call__(self : Optional[Any] , UpperCAmelCase_ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase_ : Union[str, List[str]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->str:
'''simple docstring'''
if "text_queries" in kwargs:
lowerCamelCase__: Optional[Any] =kwargs.pop("text_queries")
if isinstance(UpperCAmelCase_ , (str, Image.Image)):
lowerCamelCase__: Union[str, Any] ={"image": image, "candidate_labels": candidate_labels}
else:
lowerCamelCase__: str =image
lowerCamelCase__: int =super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
return results
def SCREAMING_SNAKE_CASE_ (self : int , **UpperCAmelCase_ : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[str] ={}
if "threshold" in kwargs:
lowerCamelCase__: Dict =kwargs["threshold"]
if "top_k" in kwargs:
lowerCamelCase__: Dict =kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Optional[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =load_image(inputs["image"])
lowerCamelCase__: Optional[Any] =inputs["candidate_labels"]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: str =candidate_labels.split(",")
lowerCamelCase__: int =torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(UpperCAmelCase_):
lowerCamelCase__: Optional[int] =self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework)
lowerCamelCase__: Optional[int] =self.image_processor(UpperCAmelCase_ , return_tensors=self.framework)
yield {
"is_last": i == len(UpperCAmelCase_) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
lowerCamelCase__: List[str] =model_inputs.pop("target_size")
lowerCamelCase__: Any =model_inputs.pop("candidate_label")
lowerCamelCase__: Optional[int] =model_inputs.pop("is_last")
lowerCamelCase__: Tuple =self.model(**UpperCAmelCase_)
lowerCamelCase__: List[str] ={"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[int]=None) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[]
for model_output in model_outputs:
lowerCamelCase__: int =model_output["candidate_label"]
lowerCamelCase__: Any =BaseModelOutput(UpperCAmelCase_)
lowerCamelCase__: List[str] =self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase_ , threshold=UpperCAmelCase_ , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
lowerCamelCase__: Union[str, Any] =outputs["scores"][index].item()
lowerCamelCase__: Union[str, Any] =self._get_bounding_box(outputs["boxes"][index][0])
lowerCamelCase__: Optional[Any] ={"score": score, "label": label, "box": box}
results.append(UpperCAmelCase_)
lowerCamelCase__: int =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: x["score"] , reverse=UpperCAmelCase_)
if top_k:
lowerCamelCase__: int =results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : "torch.Tensor") ->Any:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
lowerCamelCase__: Any =box.int().tolist()
lowerCamelCase__: int ={
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 10 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 130 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( snake_case , snake_case ):
@register_to_config
def __init__( self , *,
_a = 4 , _a = 768 , _a , _a , ):
super().__init__()
__magic_name__ : List[Any] = nn.Parameter(torch.zeros(_a ) )
# parameters for additional clip time embeddings
__magic_name__ : Tuple = nn.Linear(_a , _a )
__magic_name__ : int = nn.Linear(_a , _a )
# parameters for encoder hidden states
__magic_name__ : List[Any] = clip_extra_context_tokens
__magic_name__ : Any = nn.Linear(
_a , self.clip_extra_context_tokens * cross_attention_dim )
__magic_name__ : Any = nn.Linear(_a , _a )
__magic_name__ : Tuple = nn.LayerNorm(_a )
def SCREAMING_SNAKE_CASE ( self , *, _a , _a , _a , _a ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__magic_name__ : List[str] = image_embeddings.shape[0]
__magic_name__ : str = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__magic_name__ : Any = classifier_free_guidance_embeddings.expand(
_a , -1 )
__magic_name__ : Optional[Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__magic_name__ : Tuple = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__magic_name__ : List[str] = self.embedding_proj(_a )
__magic_name__ : Dict = self.clip_image_embeddings_project_to_time_embeddings(_a )
__magic_name__ : str = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__magic_name__ : str = self.clip_extra_context_tokens_proj(_a )
__magic_name__ : Any = clip_extra_context_tokens.reshape(_a , -1 , self.clip_extra_context_tokens )
__magic_name__ : Optional[int] = clip_extra_context_tokens.permute(0 , 2 , 1 )
__magic_name__ : int = self.encoder_hidden_states_proj(_a )
__magic_name__ : Any = self.text_encoder_hidden_states_norm(_a )
__magic_name__ : Dict = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 41 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( snake_case ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BridgeTowerImageProcessor'
UpperCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , _a , _a ):
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
__magic_name__ : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
__magic_name__ : List[str] = self.image_processor(
_a , return_tensors=_a , do_normalize=_a , do_center_crop=_a , **_a )
encoding.update(_a )
return encoding
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.tokenizer.model_input_names
__magic_name__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 41 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = 'encodec'
def __init__( self, lowercase_=[1.5, 3.0, 6.0, 12.0, 24.0], lowercase_=24000, lowercase_=1, lowercase_=False, lowercase_=None, lowercase_=None, lowercase_=128, lowercase_=32, lowercase_=1, lowercase_=[8, 5, 4, 2], lowercase_="weight_norm", lowercase_=7, lowercase_=7, lowercase_=3, lowercase_=2, lowercase_=True, lowercase_="reflect", lowercase_=2, lowercase_=2, lowercase_=1.0, lowercase_=1024, lowercase_=None, lowercase_=True, **lowercase_, ) -> List[Any]:
"""simple docstring"""
a__ =target_bandwidths
a__ =sampling_rate
a__ =audio_channels
a__ =normalize
a__ =chunk_length_s
a__ =overlap
a__ =hidden_size
a__ =num_filters
a__ =num_residual_layers
a__ =upsampling_ratios
a__ =norm_type
a__ =kernel_size
a__ =last_kernel_size
a__ =residual_kernel_size
a__ =dilation_growth_rate
a__ =use_causal_conv
a__ =pad_mode
a__ =compress
a__ =num_lstm_layers
a__ =trim_right_ratio
a__ =codebook_size
a__ =codebook_dim if codebook_dim is not None else hidden_size
a__ =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**lowercase_ )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 188 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =data
a__ =[0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def _UpperCAmelCase ( lowercase_, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
a__ =self.data + padding + struct.pack('''>Q''', 8 * len(self.data ) )
return padded_data
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def _UpperCAmelCase ( self, lowercase_ ) -> List[Any]:
"""simple docstring"""
a__ =list(struct.unpack('''>16L''', lowercase_ ) ) + [0] * 64
for i in range(16, 80 ):
a__ =self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
a__ =self.padding()
a__ =self.split_blocks()
for block in self.blocks:
a__ =self.expand_block(lowercase_ )
a__, a__, a__, a__, a__ =self.h
for i in range(0, 80 ):
if 0 <= i < 20:
a__ =(b & c) | ((~b) & d)
a__ =0X5a827999
elif 20 <= i < 40:
a__ =b ^ c ^ d
a__ =0X6ed9eba1
elif 40 <= i < 60:
a__ =(b & c) | (b & d) | (c & d)
a__ =0X8f1bbcdc
elif 60 <= i < 80:
a__ =b ^ c ^ d
a__ =0Xca62c1d6
a__, a__, a__, a__, a__ =(
self.rotate(lowercase_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(lowercase_, 30 ),
c,
d,
)
a__ =(
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =b'''Test String'''
assert SHAaHash(_A ).final_hash() == hashlib.shaa(_A ).hexdigest() # noqa: S324
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
a__ =parser.parse_args()
a__ =args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
a__ =f.read()
else:
a__ =bytes(_A , '''utf-8''' )
print(SHAaHash(_A ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 188 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase_ ( __lowercase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
_UpperCAmelCase = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_UpperCAmelCase = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_UpperCAmelCase = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_UpperCAmelCase = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_UpperCAmelCase = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_UpperCAmelCase = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_UpperCAmelCase = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_UpperCAmelCase = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_UpperCAmelCase = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_UpperCAmelCase = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_UpperCAmelCase = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_UpperCAmelCase = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_UpperCAmelCase = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_UpperCAmelCase = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_UpperCAmelCase = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_UpperCAmelCase = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_UpperCAmelCase = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_UpperCAmelCase = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_UpperCAmelCase = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_UpperCAmelCase = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_UpperCAmelCase = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_UpperCAmelCase = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_UpperCAmelCase = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_UpperCAmelCase = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def UpperCAmelCase_ ( __lowercase : List[Any] , __lowercase : Any ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_UpperCAmelCase = orig_state_dict.pop(_UpperCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[2] ), int(key_split[4] )
_UpperCAmelCase = config.vision_config.hidden_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[dim : dim * 2, :]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[dim : dim * 2]
_UpperCAmelCase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_UpperCAmelCase = key.split("." )
_UpperCAmelCase = int(key_split[3] )
_UpperCAmelCase = config.text_config.hidden_size
if "weight" in key:
_UpperCAmelCase = val[:dim, :]
_UpperCAmelCase = val[
dim : dim * 2, :
]
_UpperCAmelCase = val[-dim:, :]
else:
_UpperCAmelCase = val[:dim]
_UpperCAmelCase = val[dim : dim * 2]
_UpperCAmelCase = val[-dim:]
else:
_UpperCAmelCase = rename_key(_UpperCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_UpperCAmelCase = val.squeeze_()
else:
_UpperCAmelCase = val
return orig_state_dict
def UpperCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple="groupvit-gcc-yfcc" , __lowercase : Any=False ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = GroupViTConfig()
_UpperCAmelCase = GroupViTModel(_UpperCAmelCase ).eval()
_UpperCAmelCase = torch.load(_UpperCAmelCase , map_location="cpu" )['model']
_UpperCAmelCase = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_UpperCAmelCase ) == 0)
# verify result
_UpperCAmelCase = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = processor(text=["a photo of a cat", "a photo of a dog"] , images=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors="pt" )
with torch.no_grad():
_UpperCAmelCase = model(**_UpperCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_UpperCAmelCase = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
_UpperCAmelCase = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , _UpperCAmelCase , atol=1E-3 )
processor.save_pretrained(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
print("Successfully saved processor and model to" , _UpperCAmelCase )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(_UpperCAmelCase , organization="nielsr" )
model.push_to_hub(_UpperCAmelCase , organization="nielsr" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__SCREAMING_SNAKE_CASE :List[Any] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 351 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE :Optional[int] = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple , __lowercase : Union[str, Any]=8 ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A_ ( lowerCAmelCase_ ):
def __init__( self : Dict , snake_case_ : UNetaDConditionModel , snake_case_ : DDPMScheduler , snake_case_ : VQModel , ):
super().__init__()
self.register_modules(
unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
_UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase ( self : str , snake_case_ : Tuple , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any] ):
if latents is None:
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase = latents.to(snake_case_ )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : Tuple , snake_case_ : List[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def lowercase ( self : List[str] , snake_case_ : Optional[Any]=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
_UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase ( self : Any ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : str , snake_case_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case_ : int = 5_1_2 , snake_case_ : int = 5_1_2 , snake_case_ : int = 1_0_0 , snake_case_ : float = 4.0 , snake_case_ : int = 1 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = guidance_scale > 1.0
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = torch.cat(snake_case_ , dim=0 )
_UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = image_embeds.repeat_interleave(snake_case_ , dim=0 )
_UpperCAmelCase = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case_ )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.unet.config.in_channels
_UpperCAmelCase , _UpperCAmelCase = downscale_height_and_width(snake_case_ , snake_case_ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = {"image_embeds": image_embeds}
_UpperCAmelCase = self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase = variance_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , )[0]
# post-processing
_UpperCAmelCase = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_UpperCAmelCase = image * 0.5 + 0.5
_UpperCAmelCase = image.clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 156 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.