code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Any = tempfile.mkdtemp()
# fmt: off
__magic_name__ : Optional[int] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__magic_name__ : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__magic_name__ : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__magic_name__ : List[Any] = {"""unk_token""": """<unk>"""}
__magic_name__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__magic_name__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
__magic_name__ : Tuple = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__magic_name__ : List[str] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Dict:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> str:
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> Dict:
__magic_name__ : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__magic_name__ : int = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self ) -> int:
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Dict = self.get_rust_tokenizer()
__magic_name__ : Union[str, Any] = self.get_image_processor()
__magic_name__ : str = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__magic_name__ : List[str] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__magic_name__ : int = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__magic_name__ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : str = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : str = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__magic_name__ : Tuple = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = self.get_image_processor()
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : Tuple = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.prepare_image_inputs()
__magic_name__ : Any = image_processor(lowerCAmelCase__ , return_tensors="""np""" )
__magic_name__ : Dict = processor(images=lowerCAmelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : Tuple = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Dict = """lower newer"""
__magic_name__ : List[str] = processor(text=lowerCAmelCase__ )
__magic_name__ : Tuple = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.get_image_processor()
__magic_name__ : Any = self.get_tokenizer()
__magic_name__ : str = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : List[str] = """lower newer"""
__magic_name__ : int = self.prepare_image_inputs()
__magic_name__ : List[str] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __magic_name__ ( self ) -> int:
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : int = self.get_tokenizer()
__magic_name__ : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ : str = processor.batch_decode(lowerCAmelCase__ )
__magic_name__ : str = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = self.get_image_processor()
__magic_name__ : Optional[int] = self.get_tokenizer()
__magic_name__ : Tuple = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__magic_name__ : Any = """lower newer"""
__magic_name__ : Any = self.prepare_image_inputs()
__magic_name__ : Optional[int] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 342 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 1 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 3 , __UpperCamelCase : int = 7 , __UpperCamelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =0
__UpperCamelCase =1
for current_denominator in range(1 , limit + 1 ):
__UpperCamelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCamelCase =current_numerator
__UpperCamelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 360 | """simple docstring"""
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =0
# if input_string is "aba" than new_input_string become "a|b|a"
__UpperCamelCase =''''''
__UpperCamelCase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__UpperCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__UpperCamelCase , __UpperCamelCase =0, 0
# length[i] shows the length of palindromic substring with center i
__UpperCamelCase =[1 for i in range(len(__UpperCamelCase ) )]
# for each character in new_string find corresponding palindromic string
__UpperCamelCase =0
for j in range(len(__UpperCamelCase ) ):
__UpperCamelCase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__UpperCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__UpperCamelCase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__UpperCamelCase =j - k + 1 # noqa: E741
__UpperCamelCase =j + k - 1
# update max_length and start position
if max_length < length[j]:
__UpperCamelCase =length[j]
__UpperCamelCase =j
# create that string
__UpperCamelCase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SwinConfig()
__SCREAMING_SNAKE_CASE = swin_name.split("_" )
__SCREAMING_SNAKE_CASE = name_split[1]
__SCREAMING_SNAKE_CASE = int(name_split[4] )
__SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
__SCREAMING_SNAKE_CASE = 96
__SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
__SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
__SCREAMING_SNAKE_CASE = 96
__SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
__SCREAMING_SNAKE_CASE = 192
__SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
__SCREAMING_SNAKE_CASE = 2_1841
else:
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = "huggingface/label-files"
__SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
__SCREAMING_SNAKE_CASE = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = img_size
__SCREAMING_SNAKE_CASE = num_classes
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
return config
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__SCREAMING_SNAKE_CASE = "encoder." + name
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("attn" , "attention.self" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE = "layernorm.weight"
if name == "norm.bias":
__SCREAMING_SNAKE_CASE = "layernorm.bias"
if "head" in name:
__SCREAMING_SNAKE_CASE = name.replace("head" , "classifier" )
else:
__SCREAMING_SNAKE_CASE = "swin." + name
return name
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(lowerCAmelCase_ )
if "mask" in key:
continue
elif "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split("." )
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = int(key_split[3] )
__SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[
:dim
]
__SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
__SCREAMING_SNAKE_CASE = get_swin_config(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SwinForImageClassification(lowerCAmelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
__SCREAMING_SNAKE_CASE = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase_ , return_tensors="pt" )
__SCREAMING_SNAKE_CASE = timm_model(inputs["pixel_values"] )
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ).logits
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
print(f"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ : Union[str, Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 54 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int], *_lowerCamelCase : Union[str, Any], **_lowerCamelCase : Dict ):
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''', _lowerCamelCase, )
super().__init__(*_lowerCamelCase, **_lowerCamelCase )
| 266 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A_ : Dict ="""pt"""
elif is_tf_available():
A_ : Any ="""tf"""
else:
A_ : Union[str, Any] ="""jax"""
class __a ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : str = ByTaTokenizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def snake_case_ ( self ):
super().setUp()
_lowerCamelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case_ ( self ):
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def snake_case_ ( self , **a__ ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def snake_case_ ( self , a__ , a__=False , a__=20 , a__=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
_lowerCamelCase = []
for i in range(len(a__ ) ):
try:
_lowerCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_lowerCamelCase = list(filter(lambda a__ : re.match(r'^[ a-zA-Z]+$' , t[1] ) , a__ ) )
_lowerCamelCase = list(filter(lambda a__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
_lowerCamelCase = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
_lowerCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
_lowerCamelCase = [t[0] for t in toks]
# Ensure consistency
_lowerCamelCase = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
_lowerCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
_lowerCamelCase = ' ' + output_txt
_lowerCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_lowerCamelCase = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = 'Unicode €.'
_lowerCamelCase = tokenizer(a__ )
_lowerCamelCase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , a__ )
# decoding
_lowerCamelCase = tokenizer.decode(a__ )
self.assertEqual(a__ , 'Unicode €.</s>' )
_lowerCamelCase = tokenizer('e è é ê ë' )
_lowerCamelCase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , a__ )
# decoding
_lowerCamelCase = tokenizer.decode(a__ )
self.assertEqual(a__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_lowerCamelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
_lowerCamelCase = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
_lowerCamelCase = list(batch.input_ids.numpy()[0] )
else:
_lowerCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_lowerCamelCase = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , a__ )
self.assertIn('attention_mask' , a__ )
self.assertNotIn('decoder_input_ids' , a__ )
self.assertNotIn('decoder_attention_mask' , a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = [
'Summary of the text.',
'Another summary.',
]
_lowerCamelCase = tokenizer(
text_target=a__ , max_length=32 , padding='max_length' , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def snake_case_ ( self ):
_lowerCamelCase = self.ta_base_tokenizer
_lowerCamelCase = ['A long paragraph for summarization. </s>']
_lowerCamelCase = ['Summary of the text. </s>']
# fmt: off
_lowerCamelCase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
_lowerCamelCase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
_lowerCamelCase = tokenizer(a__ , text_target=a__ )
self.assertEqual(a__ , batch['input_ids'][0] )
self.assertEqual(a__ , batch['labels'][0] )
def snake_case_ ( self ):
# safety check on max_len default value so we are sure the test works
_lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = ' He is very happy, UNwant\u00E9d,running'
_lowerCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
_lowerCamelCase = tokenizer.__class__.from_pretrained(a__ )
_lowerCamelCase = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
_lowerCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_lowerCamelCase = tempfile.mkdtemp()
_lowerCamelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_lowerCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_lowerCamelCase = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
_lowerCamelCase = tokenizer.__class__.from_pretrained(a__ )
_lowerCamelCase = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_lowerCamelCase = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def snake_case_ ( self ):
_lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase = json.load(a__ )
with open(os.path.join(a__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_lowerCamelCase = json.load(a__ )
_lowerCamelCase = [F'<extra_id_{i}>' for i in range(1_25 )]
_lowerCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
_lowerCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(a__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_lowerCamelCase = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_lowerCamelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=a__ )]
_lowerCamelCase = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def snake_case_ ( self ):
_lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
_lowerCamelCase = tokenizer_class.from_pretrained(a__ )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
_lowerCamelCase = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_lowerCamelCase = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_lowerCamelCase = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
_lowerCamelCase = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_lowerCamelCase = 0
_lowerCamelCase = tokenizer.convert_ids_to_tokens(
a__ , skip_special_tokens=a__ )
for attr in attributes_list:
setattr(a__ , attr + '_id' , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + '_id' ) , a__ )
setattr(a__ , attr + '_id' , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + '_id' ) , a__ )
setattr(a__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens_ids' ) , [] )
setattr(a__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 80 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __a ( unittest.TestCase ):
def snake_case_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'bird'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_lowerCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
_lowerCamelCase , _lowerCamelCase = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase , _lowerCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
_lowerCamelCase = controlnet_params
_lowerCamelCase = 'Chef in the kitchen'
_lowerCamelCase = jax.device_count()
_lowerCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_lowerCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_lowerCamelCase = jax.random.PRNGKey(0 )
_lowerCamelCase = jax.random.split(a__ , jax.device_count() )
_lowerCamelCase = replicate(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = shard(a__ )
_lowerCamelCase = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
_lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 80 | 1 |
from math import factorial
def lowercase_ ( _A : int , _A : int , _A : float ):
"""simple docstring"""
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(__a , __a ) or not isinstance(__a , __a ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
lowerCamelCase__ : int = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCamelCase__ : Dict = float(factorial(__a ) )
coefficient /= factorial(__a ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 184 |
def __lowerCamelCase ( __a :str ) -> list:
"""simple docstring"""
A__ = [0] * len(__a )
for i in range(1 , len(__a ) ):
# use last results for better performance - dynamic programming
A__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
A__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
A__ = j
return prefix_result
def __lowerCamelCase ( __a :str ) -> int:
"""simple docstring"""
return max(prefix_function(__a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ :
def __init__( self , A , A=2 , A=3 , A=4 , A=2 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=36 , A=3 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=6 , A=6 , A=3 , A=4 , A=None , A=1000 , ) -> Union[str, Any]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Any = text_seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Optional[int] = use_input_mask
UpperCAmelCase : Tuple = use_token_type_ids
UpperCAmelCase : Tuple = use_labels
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
UpperCAmelCase : Any = intermediate_size
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : Optional[int] = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Dict = coordinate_size
UpperCAmelCase : Optional[int] = shape_size
UpperCAmelCase : Tuple = num_labels
UpperCAmelCase : Tuple = num_choices
UpperCAmelCase : Optional[Any] = scope
UpperCAmelCase : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase : Any = text_seq_length
UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2 + 1
UpperCAmelCase : str = self.text_seq_length + self.image_seq_length
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase : Optional[Any] = bbox[i, j, 3]
UpperCAmelCase : int = bbox[i, j, 1]
UpperCAmelCase : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase : List[str] = bbox[i, j, 2]
UpperCAmelCase : List[str] = bbox[i, j, 0]
UpperCAmelCase : int = t
UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] = None
if self.use_input_mask:
UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase : Tuple = None
UpperCAmelCase : Optional[Any] = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase : Any = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase( self , A , A , A , A , A , A , A , A ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
UpperCAmelCase : Dict = model(A , pixel_values=A )
UpperCAmelCase : List[Any] = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A )
UpperCAmelCase : int = model(A , bbox=A , pixel_values=A , token_type_ids=A )
UpperCAmelCase : List[Any] = model(A , bbox=A , pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase : Optional[Any] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase : Optional[int] = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowercase( self , A , A , A , A , A , A , A , A ) -> Dict:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : List[str] = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase : Dict = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A , A , A ) -> Optional[Any]:
UpperCAmelCase : Any = self.num_labels
UpperCAmelCase : Optional[Any] = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
UpperCAmelCase : int = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowercase( self , A , A , A , A , A , A , A , A ) -> Any:
UpperCAmelCase : Optional[Any] = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
UpperCAmelCase : List[str] = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase( self ) -> int:
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase : Dict = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = False
lowercase = False
lowercase = False
lowercase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowercase( self , A , A , A , A , A ) -> str:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = LayoutLMvaModelTester(self )
UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def _lowercase( self , A , A , A=False ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = copy.deepcopy(A )
if model_class in get_values(A ):
UpperCAmelCase : str = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
UpperCAmelCase : Any = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A )
elif model_class in get_values(A ):
UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
UpperCAmelCase : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
elif model_class in [
*get_values(A ),
]:
UpperCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
elif model_class in [
*get_values(A ),
]:
UpperCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A , )
return inputs_dict
def _lowercase( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def _lowercase( self ) -> int:
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def _lowercase( self ) -> int:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : List[str] = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@cached_property
def _lowercase( self ) -> List[Any]:
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Union[str, Any] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(A )
UpperCAmelCase : Dict = self.default_image_processor
UpperCAmelCase : str = prepare_img()
UpperCAmelCase : List[str] = image_processor(images=A , return_tensors="""pt""" ).pixel_values.to(A )
UpperCAmelCase : Tuple = torch.tensor([[1, 2]] )
UpperCAmelCase : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase : Dict = model(
input_ids=input_ids.to(A ) , bbox=bbox.to(A ) , pixel_values=pixel_values.to(A ) , )
# verify the logits
UpperCAmelCase : List[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , A )
UpperCAmelCase : Tuple = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1e-4 ) )
| 338 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" )
if "model" in sd.keys():
UpperCAmelCase : Any = torch.load(_lowercase , map_location="""cpu""" )["""model"""]
# pop unnecessary weights
UpperCAmelCase : Union[str, Any] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowercase )
UpperCAmelCase : Tuple = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCAmelCase : List[Any] = sd.pop(_lowercase )
UpperCAmelCase : Tuple = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCAmelCase : List[str] = sd[key]
# We split QKV in separate Q,K,V
UpperCAmelCase : Dict = key.replace(""".qkv_proj.""" , """.q_proj.""" )
UpperCAmelCase : Tuple = key.replace(""".qkv_proj.""" , """.k_proj.""" )
UpperCAmelCase : int = key.replace(""".qkv_proj.""" , """.v_proj.""" )
UpperCAmelCase : Dict = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = torch.split(_lowercase , depth // 3 , dim=0 )
UpperCAmelCase : Tuple = q
UpperCAmelCase : Tuple = k
UpperCAmelCase : Any = v
del sd[key]
return sd
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None ) -> Optional[Any]:
UpperCAmelCase : Tuple = load_checkpoint(_lowercase )
if config is not None:
UpperCAmelCase : Dict = OPTConfig.from_pretrained(_lowercase )
else:
UpperCAmelCase : int = OPTConfig()
UpperCAmelCase : List[Any] = OPTModel(_lowercase ).half().eval()
model.load_state_dict(_lowercase )
# Check results
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
a : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 338 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "philschmid/bart-large-cnn-samsum"
UpperCAmelCase__ : Optional[Any] = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
UpperCAmelCase__ : Optional[Any] = "summarizer"
UpperCAmelCase__ : Optional[int] = AutoTokenizer
UpperCAmelCase__ : str = AutoModelForSeqaSeqLM
UpperCAmelCase__ : Any = ["text"]
UpperCAmelCase__ : str = ["text"]
def _a ( self , A_ ) -> Optional[Any]:
return self.pre_processor(A_ , return_tensors='pt' , truncation=A_ )
def _a ( self , A_ ) -> Dict:
return self.model.generate(**A_ )[0]
def _a ( self , A_ ) -> List[str]:
return self.pre_processor.decode(A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
| 62 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 1 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
lowerCamelCase = 250_004
lowerCamelCase = 250_020
@require_sentencepiece
@require_tokenizers
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MBartTokenizer
UpperCamelCase = MBartTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def lowercase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase_ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCAmelCase_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase_ = tokenizer_r.from_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = '''facebook/mbart-large-en-ro'''
UpperCamelCase = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
UpperCamelCase = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
UpperCamelCase = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def lowercase__ ( cls : Optional[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
UpperCAmelCase_ = 1
return cls
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
UpperCAmelCase_ = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase_ = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , _UpperCAmelCase )
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def lowercase__ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase )
UpperCAmelCase_ = MBartTokenizer.from_pretrained(_UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase )
@require_torch
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
UpperCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt" )
UpperCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt" )
UpperCAmelCase_ = targets["input_ids"]
UpperCAmelCase_ = shift_tokens_right(_UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 241 | 1 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase : Optional[int] = 'Usage of script: script_name <size_of_canvas:int>'
lowercase : Any = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = [[False for i in range(snake_case__ )] for j in range(snake_case__ )]
return canvas
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
for i, row in enumerate(snake_case__ ):
for j, _ in enumerate(snake_case__ ):
A : Tuple = bool(random.getrandbits(1 ) )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[str] = np.array(snake_case__ )
A : Dict = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(snake_case__ ):
for c, pt in enumerate(snake_case__ ):
A : List[str] = __judge_point(
snake_case__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
A : Union[str, Any] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
A : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = 0
A : int = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
A : Union[str, Any] = pt
if pt:
if alive < 2:
A : Optional[int] = False
elif alive == 2 or alive == 3:
A : Optional[int] = True
elif alive > 3:
A : str = False
else:
if alive == 3:
A : Optional[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase : Optional[int] = int(sys.argv[1])
# main working structure of this module.
lowercase : Optional[int] = create_canvas(canvas_size)
seed(c)
lowercase , lowercase : str = plt.subplots()
fig.show()
lowercase : Any = ListedColormap(['w', 'k'])
try:
while True:
lowercase : List[str] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 3 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[str] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
A : Any = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : Any = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : List[str] = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : Optional[Any] = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
A : List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
A : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ):
A : List[Any] = FlaxAutoModel.from_pretrained('''bert-base''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A : Optional[int] = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ):
A : List[str] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''Use `from_pt=True` to load this model''' ):
A : Any = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 3 | 1 |
import numpy as np
from transformers import Pipeline
def lowerCAmelCase( __lowerCamelCase ):
__a = np.max(__lowerCamelCase , axis=-1 , keepdims=__lowerCamelCase )
__a = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase )
class a__ ( __snake_case ):
def __SCREAMING_SNAKE_CASE ( self , **UpperCAmelCase ) -> int:
__a = {}
if "second_text" in kwargs:
__a = kwargs['second_text']
return preprocess_kwargs, {}, {}
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Dict:
return self.tokenizer(UpperCAmelCase , text_pair=UpperCAmelCase , return_tensors=self.framework )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Union[str, Any]:
return self.model(**UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
__a = model_outputs.logits[0].numpy()
__a = softmax(UpperCAmelCase )
__a = np.argmax(UpperCAmelCase )
__a = self.model.config.idalabel[best_class]
__a = probabilities[best_class].item()
__a = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 197 | import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class a__ :
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
torch.manual_seed(0 )
__a = TaEncoderModel.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-t5' )
torch.manual_seed(0 )
__a = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'ResnetDownsampleBlock2D',
'SimpleCrossAttnDownBlock2D',
] , mid_block_type='UNetMidBlock2DSimpleCrossAttn' , up_block_types=['SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='text' , addition_embed_type_num_heads=2 , cross_attention_norm='group_norm' , resnet_time_scale_shift='scale_shift' , act_fn='gelu' , class_embed_type='timestep' , mid_block_scale_factor=1.414 , time_embedding_act_fn='gelu' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='epsilon' , variance_type='learned_range' , )
torch.manual_seed(0 )
__a = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='squaredcos_cap_v2' , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
__a = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['prompt']
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
if "image" in inputs:
__a = inputs['image']
else:
__a = None
if "mask_image" in inputs:
__a = inputs['mask_image']
else:
__a = None
if "original_image" in inputs:
__a = inputs['original_image']
else:
__a = None
__a , __a = pipe.encode_prompt(UpperCAmelCase )
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCAmelCase , UpperCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = inputs['generator']
__a = inputs['num_inference_steps']
__a = inputs['output_type']
# inputs with prompt converted to embeddings
__a = {
'prompt_embeds': prompt_embeds,
'negative_prompt_embeds': negative_prompt_embeds,
'generator': generator,
'num_inference_steps': num_inference_steps,
'output_type': output_type,
}
if image is not None:
__a = image
if mask_image is not None:
__a = mask_image
if original_image is not None:
__a = original_image
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.get_dummy_components()
__a = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe(**UpperCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCAmelCase )
__a = self.pipeline_class.from_pretrained(UpperCAmelCase )
pipe_loaded.to(UpperCAmelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__a = self.get_dummy_inputs(UpperCAmelCase )
__a = pipe_loaded(**UpperCAmelCase )[0]
__a = np.abs(to_np(UpperCAmelCase ) - to_np(UpperCAmelCase ) ).max()
self.assertLess(UpperCAmelCase , 1e-4 )
| 197 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCAmelCase__ = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
UpperCAmelCase__ = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
UpperCAmelCase__ = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] ) ->Tuple:
"""simple docstring"""
a = 0.0
for i, j in zip(__UpperCAmelCase , __UpperCAmelCase ):
n_correct += 1.0 if math_equivalence.is_equiv(__UpperCAmelCase , __UpperCAmelCase ) else 0.0
a = n_correct / len(__UpperCAmelCase )
return {
"accuracy": accuracy,
}
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , ):
lowercase__ = {}
if train_file is not None:
lowercase__ = [train_file]
if eval_file is not None:
lowercase__ = [eval_file]
if test_file is not None:
lowercase__ = [test_file]
lowercase__ = datasets.load_dataset("csv" , data_files=__magic_name__ )
lowercase__ = list(ds[list(files.keys() )[0]].features.keys() )
lowercase__ = features_name.pop(__magic_name__ )
lowercase__ = list(set(ds[list(files.keys() )[0]][label_name] ) )
lowercase__ = {label: i for i, label in enumerate(__magic_name__ )}
lowercase__ = tokenizer.model_input_names
lowercase__ = {}
if len(__magic_name__ ) == 1:
for k in files.keys():
lowercase__ = ds[k].map(
lambda __magic_name__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__magic_name__ , max_length=__magic_name__ , padding="max_length" ) , batched=__magic_name__ , )
elif len(__magic_name__ ) == 2:
for k in files.keys():
lowercase__ = ds[k].map(
lambda __magic_name__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__magic_name__ , max_length=__magic_name__ , padding="max_length" , ) , batched=__magic_name__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
lowercase__ = {k: v for k, v in ex.items() if k in input_names}
lowercase__ = labelaid[ex[label_name]]
yield (d, label)
lowercase__ = (
tf.data.Dataset.from_generator(
__magic_name__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
lowercase__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
lowercase__ = (
tf.data.Dataset.from_generator(
__magic_name__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
lowercase__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
lowercase__ = (
tf.data.Dataset.from_generator(
__magic_name__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
lowercase__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_snake_case = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(metadata={'help': 'Which column contains the label'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'The path of the training file'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'The path of the development file'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'The path of the test file'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__magic_name__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__magic_name__ ) , labelaid=__magic_name__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , )
def compute_metrics(__magic_name__ ) -> Dict:
lowercase__ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
lowercase__ = TFTrainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , compute_metrics=__magic_name__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate()
lowercase__ = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__magic_name__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__magic_name__ )
return results
if __name__ == "__main__":
main()
| 201 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _A ( __magic_name__ ):
lowercase__ = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = StableDiffusionLatentUpscalePipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCamelCase = frozenset([] )
__lowerCamelCase = True
@property
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = 1
lowercase__ = 4
lowercase__ = (16, 16)
lowercase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=_lowercase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=_lowercase , only_cross_attention=_lowercase , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
lowercase__ = EulerDiscreteScheduler(prediction_type="sample" )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
lowercase__ = CLIPTextModel(_lowercase )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase ( self :Dict , _lowercase :Union[str, Any] , _lowercase :int=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
lowercase__ = torch.manual_seed(_lowercase )
else:
lowercase__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "cpu"
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
lowercase__ = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1e-3 )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = 2
lowercase__ = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase__ = getattr(_lowercase , scheduler_enum.name )
lowercase__ = scheduler_cls.from_config(pipe.scheduler.config )
lowercase__ = pipe(**_lowercase )[0]
outputs.append(_lowercase )
assert check_same_shape(_lowercase )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
lowercase__ = pipe(_lowercase , generator=_lowercase , output_type="latent" ).images
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = torch.manual_seed(33 )
lowercase__ = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
lowercase__ = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
lowercase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
lowercase__ = upscaler(
prompt=_lowercase , image=_lowercase , num_inference_steps=20 , guidance_scale=0 , generator=_lowercase , output_type="np" , ).images[0]
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 201 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : int = "▁"
A_ : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
A_ : List[Any] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
A_ : List[str] = {"vinai/bartpho-syllable": 1_024}
class lowerCamelCase (A__ ):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]="<s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Dict="<unk>" , __UpperCAmelCase : Optional[Any]="<pad>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = monolingual_vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ = cnt
cnt += 1
with open(__UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE__ = line.strip().split()[0]
SCREAMING_SNAKE_CASE__ = len(self.fairseq_tokens_to_ids )
if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE__ = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , __UpperCAmelCase : str ) -> Tuple:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = """""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__UpperCAmelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 165 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=True ) -> Any:
SCREAMING_SNAKE_CASE__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]=True ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any=True ) -> Union[str, Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=True ) -> Dict:
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
# there is always at least one resnet
SCREAMING_SNAKE_CASE__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE__ = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
def __call__( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str]=True ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.resnets[0](__UpperCAmelCase , __UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
return hidden_states
| 165 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class UpperCamelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = BlenderbotConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = "gelu"
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=13 ,UpperCAmelCase_=7 ,UpperCAmelCase_=True ,UpperCAmelCase_=False ,UpperCAmelCase_=99 ,UpperCAmelCase_=32 ,UpperCAmelCase_=2 ,UpperCAmelCase_=4 ,UpperCAmelCase_=37 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=20 ,UpperCAmelCase_=2 ,UpperCAmelCase_=1 ,UpperCAmelCase_=0 ,):
_lowercase : Optional[int] = parent
_lowercase : Union[str, Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : Dict = is_training
_lowercase : Optional[int] = use_labels
_lowercase : int = vocab_size
_lowercase : Union[str, Any] = hidden_size
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Tuple = attention_probs_dropout_prob
_lowercase : Optional[int] = max_position_embeddings
_lowercase : List[str] = eos_token_id
_lowercase : Dict = pad_token_id
_lowercase : Any = bos_token_id
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
_lowercase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
_lowercase : Tuple = tf.concat([input_ids, eos_tensor] ,axis=1 )
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
_lowercase : int = prepare_blenderbot_inputs_dict(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Any = TFBlenderbotModel(config=UpperCAmelCase_ ).get_decoder()
_lowercase : Union[str, Any] = inputs_dict["""input_ids"""]
_lowercase : Optional[Any] = input_ids[:1, :]
_lowercase : Union[str, Any] = inputs_dict["""attention_mask"""][:1, :]
_lowercase : List[Any] = inputs_dict["""head_mask"""]
_lowercase : Dict = 1
# first forward pass
_lowercase : List[str] = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,head_mask=UpperCAmelCase_ ,use_cache=UpperCAmelCase_ )
_lowercase , _lowercase : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase : Any = ids_tensor((self.batch_size, 3) ,config.vocab_size )
_lowercase : str = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
_lowercase : Optional[Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
_lowercase : Tuple = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
_lowercase : str = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ )[0]
_lowercase : Dict = model(UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,past_key_values=UpperCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
_lowercase : int = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
_lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_lowercase : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase_ ,UpperCAmelCase_ ,rtol=1E-3 )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ):
if attention_mask is None:
_lowercase : Dict = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
def lowerCamelCase__ ( self ):
_lowercase : List[str] = TFBlenderbotModelTester(self )
_lowercase : Dict = ConfigTester(self ,config_class=UpperCAmelCase_ )
def lowerCamelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self ):
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase_ )
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["My friends are cool but they eat too many carbs."]
SCREAMING_SNAKE_CASE_ : str = "facebook/blenderbot-400M-distill"
@cached_property
def lowerCamelCase__ ( self ):
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase__ ( self ):
_lowercase : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.tokenizer(self.src_text ,return_tensors="""tf""" )
_lowercase : Dict = self.model.generate(
model_inputs.input_ids ,)
_lowercase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=UpperCAmelCase_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 336 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ):
if k in (0.04, 0.06):
_lowercase : Optional[Any] = k
_lowercase : Optional[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self ):
return str(self.k )
def lowerCamelCase__ ( self ,UpperCAmelCase_ ):
_lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 )
_lowercase , _lowercase : Dict = img.shape
_lowercase : list[list[int]] = []
_lowercase : int = img.copy()
_lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB )
_lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ )
_lowercase : Optional[int] = dx**2
_lowercase : Optional[Any] = dy**2
_lowercase : Optional[Any] = dx * dy
_lowercase : List[str] = 0.04
_lowercase : Optional[Any] = self.window_size // 2
for y in range(UpperCAmelCase_ ,h - offset ):
for x in range(UpperCAmelCase_ ,w - offset ):
_lowercase : Optional[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : Union[str, Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowercase : int = (wxx * wyy) - (wxy**2)
_lowercase : Union[str, Any] = wxx + wyy
_lowercase : Union[str, Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) ,0 )
color_img.itemset((y, x, 1) ,0 )
color_img.itemset((y, x, 2) ,2_55 )
return color_img, corner_list
if __name__ == "__main__":
UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3)
UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 336 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
while a != 0:
_a : Dict = b % a, a
return b
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if gcd(lowerCAmelCase_ , lowerCAmelCase_ ) != 1:
_a : int = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCAmelCase_ )
_a : int = 1, 0, a
_a : Union[str, Any] = 0, 1, m
while va != 0:
_a : Optional[Any] = ua // va
_a : Tuple = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 89 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : str = CpmAntTokenizer
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
super().setUp()
lowercase__: Any = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowercase__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowercase__: Optional[Any] = '今天天气真好!'
lowercase__: str = ['今天', '天气', '真', '好', '!']
lowercase__: Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[str] = '今天天气真好!'
lowercase__: List[str] = [tokenizer.bos_token] + tokens
lowercase__: Tuple = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase__: Any = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 196 | 0 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase_ (ctypes.Structure ):
"""simple docstring"""
_lowerCAmelCase = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def snake_case__ ( ) -> Tuple:
if os.name == "nt":
A_ : str = CursorInfo()
A_ : int = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
A_ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def snake_case__ ( ) -> int:
if os.name == "nt":
A_ : Tuple = CursorInfo()
A_ : List[str] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
A_ : int = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase__ , ctypes.byref(lowerCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def snake_case__ ( ) -> Dict:
try:
hide_cursor()
yield
finally:
show_cursor()
| 365 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str]=3 , _lowerCamelCase : Any=32 , _lowerCamelCase : Union[str, Any]=3 , _lowerCamelCase : int=10 , _lowerCamelCase : Union[str, Any]=[8, 16, 32, 64] , _lowerCamelCase : Dict=[1, 1, 2, 1] , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Any="relu" , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=["stage2", "stage3", "stage4"] , _lowerCamelCase : Union[str, Any]=[2, 3, 4] , _lowerCamelCase : Tuple=1 , ):
"""simple docstring"""
A_ : List[str] = parent
A_ : List[str] = batch_size
A_ : Union[str, Any] = image_size
A_ : Tuple = num_channels
A_ : Any = embeddings_size
A_ : int = hidden_sizes
A_ : Optional[Any] = depths
A_ : List[Any] = is_training
A_ : Optional[int] = use_labels
A_ : int = hidden_act
A_ : Tuple = num_labels
A_ : Union[str, Any] = scope
A_ : List[Any] = len(_lowerCamelCase )
A_ : Union[str, Any] = out_features
A_ : List[Any] = out_indices
A_ : Dict = num_groups
def _a ( self : Optional[int] ):
"""simple docstring"""
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.num_labels )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _a ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Any = BitModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[Any] = BitForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : List[Any] = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : int = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A_ : Optional[Any] = None
A_ : int = BitBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
A_ : Optional[int] = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self : List[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ ,A_ ,A_ : Union[str, Any] = config_and_inputs
A_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ (a__, a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
_lowerCAmelCase = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[str] = BitModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _a ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _a ( self : Any ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_lowerCamelCase )
A_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a ( self : Optional[Any] ):
"""simple docstring"""
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowerCamelCase )
def _a ( self : Tuple ):
"""simple docstring"""
A_ ,A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(config=_lowerCamelCase )
for name, module in model.named_modules():
if isinstance(_lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[Any] = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_ ,A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : Tuple = layer_type
A_ : Optional[Any] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : List[str] = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : str ):
"""simple docstring"""
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _a ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[Any] = BitModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def snake_case__ ( ) -> Optional[int]:
A_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self : List[Any] ):
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _a ( self : Dict ):
"""simple docstring"""
A_ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowerCamelCase )
A_ : Union[str, Any] = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : int = image_processor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**_lowerCamelCase )
# verify the logits
A_ : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
A_ : Tuple = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@require_torch
class UpperCamelCase_ (a__, unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (BitBackbone,) if is_torch_available() else ()
_lowerCAmelCase = BitConfig
_lowerCAmelCase = False
def _a ( self : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = BitModelTester(self )
| 4 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = 1_000
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type='dataset' ) ) , 'r' ) )
UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = UpperCamelCase = CvtConfig(num_labels=A__ , idalabel=A__ , labelaid=A__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
UpperCamelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
UpperCamelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCamelCase = [2, 2, 20]
UpperCamelCase = [3, 12, 16]
UpperCamelCase = [192, 768, 1_024]
UpperCamelCase = CvtForImageClassification(A__ )
UpperCamelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
UpperCamelCase = image_size
UpperCamelCase = torch.load(A__ , map_location=torch.device('cpu' ) )
UpperCamelCase = OrderedDict()
UpperCamelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCamelCase = list_of_state_dict + cls_token(A__ )
UpperCamelCase = list_of_state_dict + embeddings(A__ )
for cnt in range(config.depth[idx] ):
UpperCamelCase = list_of_state_dict + attention(A__ , A__ )
UpperCamelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A__ )
for i in range(len(A__ ) ):
UpperCamelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Any = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 28 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCamelCase )
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
@dataclass(frozen=_UpperCamelCase )
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
def __init__( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : bool = False , ):
lowercase__ : List[str] = hans_processors[task]()
lowercase__ : Dict = os.path.join(
SCREAMING_SNAKE_CASE , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , ) , )
lowercase__ : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ : Union[str, Any] = label_list[2], label_list[1]
lowercase__ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ : int = cached_features_file + ".lock"
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowercase__ : Any = torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowercase__ : List[str] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE )
)
logger.info("Training examples: %s" , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
logger.info("Saving features into cached file %s" , SCREAMING_SNAKE_CASE )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self : List[Any] ):
return len(self.features )
def __getitem__( self : str , SCREAMING_SNAKE_CASE : List[str] ):
return self.features[i]
def snake_case ( self : Any ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class snake_case__:
"""simple docstring"""
lowercase_ = 42
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] = 128 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : bool = False , ):
lowercase__ : str = hans_processors[task]()
lowercase__ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ : str = label_list[2], label_list[1]
lowercase__ : Optional[int] = label_list
lowercase__ : Any = processor.get_dev_examples(SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(SCREAMING_SNAKE_CASE )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase__ : Optional[int] = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def snake_case ( self : int ):
return self.dataset
def __len__( self : List[str] ):
return len(self.features )
def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
return self.features[i]
def snake_case ( self : Any ):
return self.label_list
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int ):
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE , "heuristics_train_set.txt" ) ) , "train" )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Any ):
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE , "heuristics_evaluation_set.txt" ) ) , "dev" )
def snake_case ( self : Union[str, Any] ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Dict = []
for i, line in enumerate(SCREAMING_SNAKE_CASE ):
if i == 0:
continue
lowercase__ : str = "%s-%s" % (set_type, line[0])
lowercase__ : str = line[5]
lowercase__ : List[str] = line[6]
lowercase__ : Dict = line[7][2:] if line[7].startswith("ex" ) else line[7]
lowercase__ : Union[str, Any] = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE , text_a=SCREAMING_SNAKE_CASE , text_b=SCREAMING_SNAKE_CASE , label=SCREAMING_SNAKE_CASE , pairID=SCREAMING_SNAKE_CASE ) )
return examples
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
lowercase__ : str = {label: i for i, label in enumerate(lowerCamelCase__ )}
lowercase__ : str = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase__ ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
lowercase__ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="max_length" , truncation=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , )
lowercase__ : Optional[int] = label_map[example.label] if example.label in label_map else 0
lowercase__ : Any = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase__ , label=lowerCamelCase__ , pairID=lowerCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
lowerCAmelCase__ = {
'''hans''': 3,
}
lowerCAmelCase__ = {
'''hans''': HansProcessor,
}
| 130 | 0 |
from collections.abc import Sequence
def a__ ( A__ = None ):
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
SCREAMING_SNAKE_CASE_ : Any = nums[0]
for i in range(1, len(A__ ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] = nums[i]
SCREAMING_SNAKE_CASE_ : Any = max(A__, ans + num, A__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase__ : Optional[Any] =int(input('Enter number of elements : ').strip())
lowerCAmelCase__ : int =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 162 |
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase__ : Optional[int] =re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCAmelCase__ : List[Any] =re.compile(R'([a-z\d])([A-Z])')
lowerCAmelCase__ : Dict =re.compile(R'(?<!_)_(?!_)')
lowerCAmelCase__ : int =re.compile(R'(_{2,})')
lowerCAmelCase__ : Optional[Any] =R'^\w+(\.\w+)*$'
lowerCAmelCase__ : List[Any] =R'<>:/\|?*'
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = _uppercase_uppercase_re.sub(r'\1_\2', A__ )
SCREAMING_SNAKE_CASE_ : List[str] = _lowercase_uppercase_re.sub(r'\1_\2', A__ )
return name.lower()
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = _single_underscore_re.split(A__ )
SCREAMING_SNAKE_CASE_ : str = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != '' )
def a__ ( A__ ):
if os.path.basename(A__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(A__ )
def a__ ( A__, A__ ):
if os.path.basename(A__ ) != name:
raise ValueError(F'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, A__ ):
raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return F'''{filename_prefix_for_name(A__ )}-{split}'''
def a__ ( A__, A__, A__, A__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = filename_prefix_for_split(A__, A__ )
if filetype_suffix:
prefix += F'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(A__, A__ )
return F'''{filepath}*'''
def a__ ( A__, A__, A__, A__=None, A__=None ):
SCREAMING_SNAKE_CASE_ : Tuple = filename_prefix_for_split(A__, A__ )
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(A__, A__ )
if shard_lengths:
SCREAMING_SNAKE_CASE_ : Dict = len(A__ )
SCREAMING_SNAKE_CASE_ : Any = [F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(A__ )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_ : Optional[int] = [filename + F'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = prefix
if filetype_suffix:
filename += F'''.{filetype_suffix}'''
return [filename]
| 162 | 1 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" ,type=lowercase ,default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" ,type=lowercase ,default=5 )
parser.add_argument("""--batch_size""" ,type=lowercase ,default=6 )
parser.add_argument("""--gradient_accumulation_steps""" ,type=lowercase ,default=1 )
parser.add_argument("""--freeze""" ,type=lowercase ,default=lowercase )
parser.add_argument("""--learning_rate""" ,type=lowercase ,default=5E-4 )
parser.add_argument("""--seed""" ,type=lowercase ,default=0 )
parser.add_argument("""--lr_scheduler_type""" ,type=lowercase ,default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" ,type=lowercase ,default=10 )
parser.add_argument("""--weight_decay""" ,type=lowercase ,default=0.01 )
parser.add_argument("""--output_dir""" ,type=lowercase ,default="""./results""" )
return parser.parse_args()
UpperCAmelCase__ = load("""accuracy""")
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = eval_pred
_UpperCAmelCase = np.argmax(lowercase ,axis=1 )
return metric.compute(predictions=lowercase ,references=lowercase )
class a ( lowerCAmelCase_ ):
def __init__( self : List[str] , __lowerCAmelCase : Tuple ):
super().__init__()
_UpperCAmelCase = trainer
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Dict ):
if control.should_evaluate:
_UpperCAmelCase = deepcopy(__lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = get_args()
set_seed(args.seed )
_UpperCAmelCase = load_dataset("""codeparrot/codecomplex""" ,split="""train""" )
_UpperCAmelCase = dataset.train_test_split(test_size=0.2 )
_UpperCAmelCase = train_test["""test"""].train_test_split(test_size=0.5 )
_UpperCAmelCase = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7 )
_UpperCAmelCase = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_UpperCAmelCase = False
_UpperCAmelCase = ClassLabel(num_classes=7 ,names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(lowercase ):
_UpperCAmelCase = tokenizer(example["""src"""] ,truncation=lowercase ,max_length=10_24 )
_UpperCAmelCase = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_UpperCAmelCase = train_test_validation.map(
lowercase ,batched=lowercase ,remove_columns=train_test_validation["""train"""].column_names ,)
_UpperCAmelCase = DataCollatorWithPadding(tokenizer=lowercase )
_UpperCAmelCase = TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy="""epoch""" ,save_strategy="""epoch""" ,logging_strategy="""epoch""" ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model="""accuracy""" ,run_name="""complexity-java""" ,report_to="""wandb""" ,)
_UpperCAmelCase = Trainer(
model=lowercase ,args=lowercase ,train_dataset=tokenized_datasets["""train"""] ,eval_dataset=tokenized_datasets["""valid"""] ,tokenizer=lowercase ,data_collator=lowercase ,compute_metrics=lowercase ,)
print("""Training...""" )
trainer.add_callback(CustomCallback(lowercase ) )
trainer.train()
if __name__ == "__main__":
main()
| 289 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=7 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : Optional[Any]=18 , __lowerCAmelCase : str=30 , __lowerCAmelCase : List[str]=400 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=None , __lowerCAmelCase : List[str]=True , ):
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCAmelCase_ ( self : List[str] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Optional[int] = MobileViTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_flip_channel_order""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : Dict ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 289 | 1 |
snake_case__ : Any = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case__ : List[str] = concatenate_datasets
snake_case__ : int = DownloadConfig
snake_case__ : int = DownloadManager
snake_case__ : Tuple = DownloadMode
snake_case__ : int = DownloadConfig
snake_case__ : List[Any] = DownloadMode
snake_case__ : Optional[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 360 |
from __future__ import annotations
from math import pi, sqrt
def _a ( lowerCamelCase: float , lowerCamelCase: float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
import math
def __lowercase ( _A ) -> bool:
return math.sqrt(_A ) * math.sqrt(_A ) == num
def __lowercase ( _A ) -> bool:
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = n
while left <= right:
SCREAMING_SNAKE_CASE : Optional[Any] = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE : Tuple = mid - 1
else:
SCREAMING_SNAKE_CASE : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245 |
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
UpperCAmelCase__ : Tuple = TypeVar("""T""")
class a__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : bool = True ) ->None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[T, list[T]] = {} # dictionary of lists
SCREAMING_SNAKE_CASE : Dict = directed
def _lowercase ( self : int , UpperCAmelCase__ : T , UpperCAmelCase__ : T ) ->GraphAdjacencyList[T]:
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
self.adj_list[destination_vertex].append(UpperCAmelCase__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
SCREAMING_SNAKE_CASE : Tuple = [destination_vertex]
SCREAMING_SNAKE_CASE : str = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
SCREAMING_SNAKE_CASE : Optional[Any] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
SCREAMING_SNAKE_CASE : Dict = [destination_vertex]
SCREAMING_SNAKE_CASE : List[Any] = []
return self
def __repr__( self : Dict ) ->str:
"""simple docstring"""
return pformat(self.adj_list )
| 245 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = HfArgumentParser(a__)
__UpperCamelCase : str = parser.parse_args_into_dataclasses()[0]
__UpperCamelCase : int = TensorFlowBenchmark(args=a__)
try:
__UpperCamelCase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__UpperCamelCase : List[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
__UpperCamelCase : List[Any] = " ".join(str(a__).split(" ")[:-1])
__UpperCamelCase : Optional[Any] = ""
__UpperCamelCase : Union[str, Any] = eval(str(a__).split(" ")[-1])
__UpperCamelCase : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(a__)
if len(a__) > 0:
__UpperCamelCase : Dict = full_error_msg + begin_error_msg + str(a__)
raise ValueError(a__)
benchmark.run()
if __name__ == "__main__":
main() | 360 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : str) -> Any:
'''simple docstring'''
__UpperCamelCase : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 0
while b > 0:
if b & 1:
__UpperCamelCase : str = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 151 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
assert (
isinstance(snake_case__ , snake_case__ ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
A, A : Dict = 1, 1
for _ in range(number_of_steps - 1 ):
A, A : Optional[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 322 | 0 |
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : float
__UpperCamelCase : TreeNode | None = None
__UpperCamelCase : TreeNode | None = None
def __magic_name__( lowerCamelCase):
# Validation
def is_valid_tree(lowerCamelCase) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase, lowerCamelCase):
return False
try:
float(node.data)
except (TypeError, ValueError):
return False
return is_valid_tree(node.left) and is_valid_tree(node.right)
if not is_valid_tree(lowerCamelCase):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''')
def is_binary_search_tree_recursive_check(
lowerCamelCase, lowerCamelCase, lowerCamelCase) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left, lowerCamelCase, node.data)
and is_binary_search_tree_recursive_check(
node.right, node.data, lowerCamelCase)
)
return is_binary_search_tree_recursive_check(lowerCamelCase, -float('''inf'''), float('''inf'''))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = 'roberta'
def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
__lowerCAmelCase = classifier_dropout
class a__ ( __A ):
"""simple docstring"""
@property
def _snake_case (self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 9 | 0 |
A : Tuple = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def __lowerCAmelCase ( a__ ) -> int:
__a = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
A : int = True
A : Optional[Any] = False
def __lowerCAmelCase ( a__ ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__a = chain(next_number(a__ ) )
__a = number_chain
while number < 1000_0000:
__a = number_chain
number *= 10
return number_chain
def __lowerCAmelCase ( a__ = 1000_0000 ) -> int:
for i in range(1 , a__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 6 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = ['''input_values''', '''padding_mask''']
def __init__( self, A = 1, A = 24_000, A = 0.0, A = None, A = None, **A, ):
'''simple docstring'''
super().__init__(feature_size=A, sampling_rate=A, padding_value=A, **A )
SCREAMING_SNAKE_CASE : Any = chunk_length_s
SCREAMING_SNAKE_CASE : Dict = overlap
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, A, A = None, A = False, A = None, A = None, A = None, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[int] = bool(
isinstance(A, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(A, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(A, np.ndarray ):
SCREAMING_SNAKE_CASE : Optional[int] = np.asarray(A, dtype=np.floataa )
elif isinstance(A, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Optional[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : str = [np.asarray(A ).T]
# verify inputs are valid
for idx, example in enumerate(A ):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[str] = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
SCREAMING_SNAKE_CASE : Optional[int] = min(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE : Tuple = int(np.floor(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE : int = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
SCREAMING_SNAKE_CASE : str = max(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
SCREAMING_SNAKE_CASE : List[str] = 'max_length'
else:
SCREAMING_SNAKE_CASE : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
SCREAMING_SNAKE_CASE : int = self.pad(
A, max_length=A, truncation=A, padding=A, return_attention_mask=A, )
if padding:
SCREAMING_SNAKE_CASE : Dict = padded_inputs.pop('attention_mask' )
SCREAMING_SNAKE_CASE : Optional[int] = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
SCREAMING_SNAKE_CASE : List[str] = example[..., None]
input_values.append(example.T )
SCREAMING_SNAKE_CASE : Dict = input_values
if return_tensors is not None:
SCREAMING_SNAKE_CASE : int = padded_inputs.convert_to_tensors(A )
return padded_inputs
| 251 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__lowerCAmelCase : Dict =logging.getLogger(__name__)
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : List[Any] ):
# save results
if os.path.exists(_lowerCamelCase ):
if os.path.exists(os.path.join(_lowerCamelCase , "config.json" ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , "config.json" ) ):
os.remove(os.path.join(_lowerCamelCase , "config.json" ) )
if os.path.exists(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ):
os.remove(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
else:
os.makedirs(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Any=False ):
A__ = 2
if unlogit:
A__ = torch.pow(_lowerCamelCase , _lowerCamelCase )
A__ = p * torch.log(_lowerCamelCase )
A__ = 0
return -plogp.sum(dim=-1 )
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(_lowerCamelCase ) ) ) )
for row in range(len(_lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Any=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=None , _lowerCamelCase : Optional[int]=False ):
A__, A__ = model.config.num_hidden_layers, model.config.num_attention_heads
A__ = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
A__ = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
if head_mask is None:
A__ = torch.ones(_lowerCamelCase , _lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A__ = None
A__ = 0.0
A__ = 0.0
for step, inputs in enumerate(tqdm(_lowerCamelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
A__ = tuple(t.to(args.device ) for t in inputs )
((A__ ), ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A__ = model(_lowerCamelCase , labels=_lowerCamelCase , head_mask=_lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A__, A__, A__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCamelCase ):
A__ = entropy(attn.detach() , _lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A__ = 2
A__ = torch.pow(torch.pow(_lowerCamelCase , _lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
A__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(_lowerCamelCase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(_lowerCamelCase )
logger.info("Head ranked by importance scores" )
A__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A__ = torch.arange(
head_importance.numel() , device=args.device )
A__ = head_ranks.view_as(_lowerCamelCase )
print_ad_tensor(_lowerCamelCase )
return attn_entropy, head_importance, total_loss
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict ):
A__, A__, A__ = compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase )
A__ = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _lowerCamelCase , original_score * args.masking_threshold )
A__ = torch.ones_like(_lowerCamelCase )
A__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A__ = original_score
while current_score >= original_score * args.masking_threshold:
A__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A__ = float("Inf" )
A__ = head_importance.view(-1 ).sort()[1]
if len(_lowerCamelCase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
A__ = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
A__ = new_head_mask.view(-1 )
A__ = 0.0
A__ = new_head_mask.view_as(_lowerCamelCase )
A__ = new_head_mask.clone().detach()
print_ad_tensor(_lowerCamelCase )
# Compute metric and head importance again
A__, A__, A__ = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , head_mask=_lowerCamelCase )
A__ = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("Final head mask" )
print_ad_tensor(_lowerCamelCase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str] ):
A__ = datetime.now()
A__, A__, A__ = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase )
A__ = 1 / loss
A__ = datetime.now() - before_time
A__ = sum(p.numel() for p in model.parameters() )
A__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = [
v,
]
assert sum(len(_lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCamelCase )
A__ = sum(p.numel() for p in model.parameters() )
A__ = datetime.now()
A__, A__, A__ = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase , actually_pruned=_lowerCamelCase , )
A__ = 1 / loss
A__ = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowerCamelCase , _lowerCamelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowerCamelCase , _lowerCamelCase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 )
save_model(_lowerCamelCase , args.output_dir )
def UpperCamelCase ( ):
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_lowerCamelCase , type=_lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_lowerCamelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_lowerCamelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_lowerCamelCase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=_lowerCamelCase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_lowerCamelCase , help="Batch size." )
parser.add_argument("--seed" , type=_lowerCamelCase , default=42 )
parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
A__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A__ = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
A__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A__ = torch.device("cuda" , args.local_rank )
A__ = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A__ = nn.parallel.DistributedDataParallel(
_lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCamelCase )
elif args.n_gpu > 1:
A__ = nn.DataParallel(_lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , _lowerCamelCase )
# Prepare dataset
A__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A__ = (torch.from_numpy(_lowerCamelCase ),)
A__ = TensorDataset(*_lowerCamelCase )
A__ = RandomSampler(_lowerCamelCase )
A__ = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A__ = mask_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
prune_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 368 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = (PNDMScheduler,)
__lowercase = (("""num_inference_steps""", 50),)
def UpperCAmelCase_ ( self :List[str] , **lowercase_ :Optional[int] )-> List[str]:
A__ = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**lowercase_ )
return config
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[int]=0 , **lowercase_ :Any )-> int:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
pass
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Optional[Any]=0 , **lowercase_ :List[str] )-> str:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
A__ = new_scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self :Optional[int] , **lowercase_ :Dict )-> Optional[int]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 10
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.prk_timesteps ):
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A__ = model(lowercase_ , lowercase_ )
A__ = scheduler.step_plms(lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop("num_inference_steps" , lowercase_ )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_ , "set_timesteps" ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_ , "set_timesteps" ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
A__ = dummy_past_residuals[:]
A__ = scheduler.step_prk(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
A__ = scheduler.step_prk(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
A__ = scheduler.step_plms(lowercase_ , 0 , lowercase_ , **lowercase_ ).prev_sample
A__ = scheduler.step_plms(lowercase_ , 1 , lowercase_ , **lowercase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase_ ( self :Any )-> Dict:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def UpperCAmelCase_ ( self :Optional[Any] )-> Any:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=lowercase_ )
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> str:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A__ = 27
for scheduler_class in self.scheduler_classes:
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A__ = scheduler.step_prk(lowercase_ , lowercase_ , lowercase_ ).prev_sample
def UpperCAmelCase_ ( self :List[str] )-> str:
with self.assertRaises(lowercase_ ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def UpperCAmelCase_ ( self :int )-> str:
A__ = self.full_loop()
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1E-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1E-3
def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]:
A__ = self.full_loop(prediction_type="v_prediction" )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1E-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1E-3
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1E-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1E-3
def UpperCAmelCase_ ( self :Dict )-> Any:
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.0_1 )
A__ = torch.sum(torch.abs(lowercase_ ) )
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1E-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1E-3
| 123 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_SCREAMING_SNAKE_CASE = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : bool , lowerCamelCase_ : str = None , lowerCamelCase_ : list = None ):
"""simple docstring"""
UpperCamelCase = None
UpperCamelCase = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
UpperCamelCase = os.path.abspath("""examples""" )
for item in os.listdir(SCREAMING_SNAKE_CASE_ ):
if item not in EXCLUDE_EXAMPLES:
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE_ , feature_script=SCREAMING_SNAKE_CASE_ , tested_section="""main()""" if parser_only else """training_function()""" , ):
UpperCamelCase = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = '''\n'''.join(SCREAMING_SNAKE_CASE_ )
if special_strings is not None:
for string in special_strings:
UpperCamelCase = diff.replace(SCREAMING_SNAKE_CASE_ , """""" )
self.assertEqual(SCREAMING_SNAKE_CASE_ , """""" )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , SCREAMING_SNAKE_CASE_ )
self.one_complete_example("""complete_nlp_example.py""" , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
UpperCamelCase = [
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example("""complete_cv_example.py""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.one_complete_example("""complete_cv_example.py""" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ ):
__lowerCAmelCase = False
@classmethod
def lowerCamelCase_ ( cls : Any ):
"""simple docstring"""
super().setUpClass()
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowerCamelCase_ ( cls : Any ):
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
self.assertNotIn("""epoch 0:""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
if torch.cuda.is_available():
UpperCamelCase = torch.cuda.device_count()
else:
UpperCamelCase = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE_ )
else:
self.assertIn("""epoch 0:""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""epoch 1:""" , SCREAMING_SNAKE_CASE_ )
@slow
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
UpperCamelCase = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = re.findall("""({.+})""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = [r for r in results if '''accuracy''' in r][-1]
UpperCamelCase = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(results["""accuracy"""] , 0.7_5 )
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
UpperCamelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , """tracking""" ) ) )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 343 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = """convbert"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Dict=3_05_22 , SCREAMING_SNAKE_CASE_ : int=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : Dict=30_72 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_12 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : int=1E-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=0 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=7_68 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=9 , SCREAMING_SNAKE_CASE_ : Tuple=1 , SCREAMING_SNAKE_CASE_ : List[Any]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A: Dict = vocab_size
A: Tuple = hidden_size
A: Optional[int] = num_hidden_layers
A: List[str] = num_attention_heads
A: int = intermediate_size
A: int = hidden_act
A: List[str] = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Tuple = max_position_embeddings
A: Any = type_vocab_size
A: str = initializer_range
A: Union[str, Any] = layer_norm_eps
A: str = embedding_size
A: Optional[int] = head_ratio
A: List[Any] = conv_kernel_size
A: List[Any] = num_groups
A: Optional[int] = classifier_dropout
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def _snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A: Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A: List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 319 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __SCREAMING_SNAKE_CASE :
pass
| 365 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase=3 , _UpperCamelCase=32 , _UpperCamelCase=3 , _UpperCamelCase=10 , _UpperCamelCase=[10, 20, 30, 40] , _UpperCamelCase=[1, 1, 2, 1] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="relu" , _UpperCamelCase=3 , _UpperCamelCase=None , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embeddings_size
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = scope
lowerCAmelCase__ = len(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetModel(config=_UpperCamelCase )
lowerCAmelCase__ = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFResNetForImageClassification(_UpperCamelCase )
lowerCAmelCase__ = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : List[str] = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(_UpperCamelCase )
lowerCAmelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase__ = model_class(_UpperCamelCase )
lowerCAmelCase__ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ = layer_type
lowerCAmelCase__ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFResNetModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=_UpperCamelCase , return_tensors='tf' )
# forward pass
lowerCAmelCase__ = model(**_UpperCamelCase )
# verify the logits
lowerCAmelCase__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
lowerCAmelCase__ = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCamelCase , atol=1E-4 ) )
| 122 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class snake_case_ (snake_case__ , snake_case__ ):
UpperCAmelCase__ : Optional[Any] = 1
@register_to_config
def __init__( self :Optional[int] ,__snake_case :int = 10_00 ,__snake_case :Optional[Union[np.ndarray, List[float]]] = None ) -> int:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__snake_case )
# standard deviation of the initial noise distribution
a__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
a__ = 4
# running values
a__ = []
def lowerCamelCase__( self :Optional[int] ,__snake_case :int ,__snake_case :Union[str, torch.device] = None ) -> Optional[Any]:
a__ = num_inference_steps
a__ = torch.linspace(1 ,0 ,num_inference_steps + 1 )[:-1]
a__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
a__ = torch.tensor(self.config.trained_betas ,dtype=torch.floataa )
else:
a__ = torch.sin(steps * math.pi / 2 ) ** 2
a__ = (1.0 - self.betas**2) ** 0.5
a__ = (torch.atana(self.betas ,self.alphas ) / math.pi * 2)[:-1]
a__ = timesteps.to(__snake_case )
a__ = []
def lowerCamelCase__( self :Optional[int] ,__snake_case :torch.FloatTensor ,__snake_case :int ,__snake_case :torch.FloatTensor ,__snake_case :bool = True ,) -> List[Any]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
a__ = (self.timesteps == timestep).nonzero().item()
a__ = timestep_index + 1
a__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__snake_case )
if len(self.ets ) == 1:
a__ = self.ets[-1]
elif len(self.ets ) == 2:
a__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
a__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
a__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
a__ = self._get_prev_sample(__snake_case ,__snake_case ,__snake_case ,__snake_case )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :torch.FloatTensor ,*__snake_case :Union[str, Any] ,**__snake_case :Dict ) -> Any:
return sample
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :List[str] ) -> Any:
a__ = self.alphas[timestep_index]
a__ = self.betas[timestep_index]
a__ = self.alphas[prev_timestep_index]
a__ = self.betas[prev_timestep_index]
a__ = (sample - sigma * ets) / max(__snake_case ,1E-8 )
a__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self :Union[str, Any] ) -> int:
return self.config.num_train_timesteps
| 240 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_UpperCAmelCase : Any = n - k
# Calculate C(n,k)
for i in range(_UpperCAmelCase ):
result *= n - i
result //= i + 1
return result
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , _UpperCAmelCase ) // (node_count + 1)
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_UpperCAmelCase : List[str] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return catalan_number(_UpperCAmelCase ) * factorial(_UpperCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
F'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 31 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase_ = 2_5_0_0_0_4
UpperCamelCase_ = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = MBartTokenizer
A : str = MBartTokenizerFast
A : List[Any] = True
A : int = True
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE : Optional[int] = MBartTokenizer(A, keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = MBartTokenizer(A, keep_accents=A )
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(A, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
], )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE : int = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : str = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.save_pretrained(A )
SCREAMING_SNAKE_CASE : Dict = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE : int = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(A, A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.from_pretrained(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A, A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(A, legacy_format=A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A, A )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : str = tokenizer_r.from_pretrained(A )
SCREAMING_SNAKE_CASE : str = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A, A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.save_pretrained(A, legacy_format=A )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.from_pretrained(A )
SCREAMING_SNAKE_CASE : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A, A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
'''simple docstring'''
A : Any = '''facebook/mbart-large-en-ro'''
A : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
A : Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
A : Dict = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO' )
SCREAMING_SNAKE_CASE : str = 1
return cls
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 250_020 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertIn(A, self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE : Optional[Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.decode(A, skip_special_tokens=A )
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=A )
self.assertEqual(A, A )
self.assertNotIn(self.tokenizer.eos_token, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0], A )
SCREAMING_SNAKE_CASE : List[Any] = 10
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(A, max_length=A, truncation=A ).input_ids[0]
self.assertEqual(ids[-2], 2 )
self.assertEqual(ids[-1], A )
self.assertEqual(len(A ), A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ), [250_026, 250_001] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
SCREAMING_SNAKE_CASE : Optional[int] = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, A )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=A, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Union[str, Any] = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=A, truncation=A, max_length=len(self.expected_src_tokens ), return_tensors='pt', )
SCREAMING_SNAKE_CASE : Any = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id )
self.assertIsInstance(A, A )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
SCREAMING_SNAKE_CASE : Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, A )
self.assertEqual(2, batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(self.src_text, padding=A, truncation=A, max_length=3, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text_target=self.tgt_text, padding=A, truncation=A, max_length=10, return_tensors='pt' )
SCREAMING_SNAKE_CASE : Optional[Any] = targets['input_ids']
SCREAMING_SNAKE_CASE : Tuple = shift_tokens_right(A, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.tokenizer._build_translation_inputs(
'A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(A ), {
# A, test, EOS, en_XX
'input_ids': [[62, 3_034, 2, 250_004]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250_001,
}, )
| 246 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int ):
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Optional[int] = 1
while repunit:
SCREAMING_SNAKE_CASE : List[str] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase__( __UpperCamelCase: int = 1_00_00_00 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 246 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
a_ = TypeVar('KT')
a_ = TypeVar('VT')
class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
def __init__( self : Optional[Any] , __lowercase : KT | str = "root" , __lowercase : VT | None = None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =key
SCREAMING_SNAKE_CASE__ : Union[str, Any] =value
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
def __repr__( self : Union[str, Any] ) -> Any:
return F"Node({self.key}: {self.value})"
@property
def __magic_name__ ( self : Optional[int] ) -> Dict:
return len(self.forward )
class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __lowercase : float = 0.5 , __lowercase : int = 16 ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : List[str] =Node[KT, VT]()
SCREAMING_SNAKE_CASE__ : List[Any] =0
SCREAMING_SNAKE_CASE__ : List[str] =p
SCREAMING_SNAKE_CASE__ : Dict =max_level
def __str__( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =list(self )
if len(_lowercase ) == 0:
return F"SkipList(level={self.level})"
SCREAMING_SNAKE_CASE__ : List[str] =max((len(str(_lowercase ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE__ : Any =max(_lowercase , 4 ) + 4
SCREAMING_SNAKE_CASE__ : List[Any] =self.head
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : Dict =node.forward.copy()
lines.append(F"[{node.key}]".ljust(_lowercase , '''-''' ) + '''* ''' * len(_lowercase ) )
lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE__ : Any =node.forward[0]
lines.append(
F"[{node.key}]".ljust(_lowercase , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Dict =node.forward
lines.append('''None'''.ljust(_lowercase ) + '''* ''' * len(_lowercase ) )
return F"SkipList(level={self.level})\n" + "\n".join(_lowercase )
def __iter__( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE__ : Tuple =node.forward[0]
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Any =1
while random() < self.p and level < self.max_level:
level += 1
return level
def __magic_name__ ( self : List[str] , __lowercase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =[]
SCREAMING_SNAKE_CASE__ : List[str] =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE__ : Tuple =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_lowercase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __magic_name__ ( self : str , __lowercase : KT ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self._locate_node(_lowercase )
if node is not None:
for i, update_node in enumerate(_lowercase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE__ : int =node.forward[i]
else:
SCREAMING_SNAKE_CASE__ : List[str] =update_node.forward[:i]
def __magic_name__ ( self : Any , __lowercase : KT , __lowercase : VT ) -> str:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] =self._locate_node(_lowercase )
if node is not None:
SCREAMING_SNAKE_CASE__ : int =value
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _lowercase ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE__ : int =level
SCREAMING_SNAKE_CASE__ : str =Node(_lowercase , _lowercase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] =new_node
def __magic_name__ ( self : Optional[Any] , __lowercase : VT ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str =self._locate_node(_lowercase )
if node is not None:
return node.value
return None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =SkipList()
skip_list.insert('''Key1''', 3 )
skip_list.insert('''Key2''', 1_2 )
skip_list.insert('''Key3''', 4_1 )
skip_list.insert('''Key4''', -1_9 )
SCREAMING_SNAKE_CASE__ : Tuple =skip_list.head
SCREAMING_SNAKE_CASE__ : Union[str, Any] ={}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : int =node.forward[0]
SCREAMING_SNAKE_CASE__ : Dict =node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =SkipList()
skip_list.insert('''Key1''', 1_0 )
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''Key5''', 7 )
skip_list.insert('''Key7''', 1_0 )
skip_list.insert('''Key10''', 5 )
skip_list.insert('''Key7''', 7 )
skip_list.insert('''Key5''', 5 )
skip_list.insert('''Key10''', 1_0 )
SCREAMING_SNAKE_CASE__ : Dict =skip_list.head
SCREAMING_SNAKE_CASE__ : Dict ={}
while node.level != 0:
SCREAMING_SNAKE_CASE__ : int =node.forward[0]
SCREAMING_SNAKE_CASE__ : Optional[int] =node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =SkipList()
assert skip_list.find('''Some key''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =SkipList()
skip_list.insert('''Key2''', 2_0 )
assert skip_list.find('''Key2''' ) == 2_0
skip_list.insert('''Some Key''', 1_0 )
skip_list.insert('''Key2''', 8 )
skip_list.insert('''V''', 1_3 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 1_0
assert skip_list.find('''V''' ) == 1_3
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 1_4
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 1_2
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 1_5
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =SkipList()
skip_list.insert('''Key1''', 1_2 )
skip_list.insert('''V''', 1_3 )
skip_list.insert('''X''', 1_4_2 )
skip_list.insert('''Key2''', 1_5 )
skip_list.delete('''X''' )
def traverse_keys(UpperCamelCase__ : int ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _a( ):
'''simple docstring'''
def is_sorted(UpperCamelCase__ : Dict ):
return all(next_item >= item for item, next_item in zip(snake_case_, lst[1:] ) )
SCREAMING_SNAKE_CASE__ : List[Any] =SkipList()
for i in range(1_0 ):
skip_list.insert(snake_case_, snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-1_2, -1_2 )
skip_list.insert(7_7, 7_7 )
assert is_sorted(list(snake_case_ ) )
def _a( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any =SkipList()
skip_list.insert(2, '''2''' )
skip_list.insert(4, '''4''' )
skip_list.insert(6, '''4''' )
skip_list.insert(4, '''5''' )
skip_list.insert(8, '''4''' )
skip_list.insert(9, '''4''' )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 152 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Dict = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "swinv2"
a__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ):
super().__init__(**_lowercase )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
__UpperCAmelCase = (0, 0, 0, 0)
| 332 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
lowercase : int = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
lowercase : List[str] = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
lowercase : List[Any] = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
lowercase : str = {
'num_train_timesteps': 40,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
lowercase : List[str] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
lowercase : int = {
'num_train_timesteps': 151,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple) -> int:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected")
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]=False) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : str = checkpoint[F'{old_prefix}.in_layers.0.weight']
__UpperCamelCase : Dict = checkpoint[F'{old_prefix}.in_layers.0.bias']
__UpperCamelCase : Dict = checkpoint[F'{old_prefix}.in_layers.2.weight']
__UpperCamelCase : List[Any] = checkpoint[F'{old_prefix}.in_layers.2.bias']
__UpperCamelCase : Dict = checkpoint[F'{old_prefix}.emb_layers.1.weight']
__UpperCamelCase : Tuple = checkpoint[F'{old_prefix}.emb_layers.1.bias']
__UpperCamelCase : Any = checkpoint[F'{old_prefix}.out_layers.0.weight']
__UpperCamelCase : Tuple = checkpoint[F'{old_prefix}.out_layers.0.bias']
__UpperCamelCase : List[str] = checkpoint[F'{old_prefix}.out_layers.3.weight']
__UpperCamelCase : List[Any] = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
__UpperCamelCase : Union[str, Any] = checkpoint[F'{old_prefix}.skip_connection.weight']
__UpperCamelCase : Any = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Any=None) -> List[str]:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : int = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0)
__UpperCamelCase : Tuple = checkpoint[F'{old_prefix}.norm.weight']
__UpperCamelCase : Optional[Any] = checkpoint[F'{old_prefix}.norm.bias']
__UpperCamelCase : List[Any] = weight_q.squeeze(-1).squeeze(-1)
__UpperCamelCase : List[str] = bias_q.squeeze(-1).squeeze(-1)
__UpperCamelCase : Tuple = weight_k.squeeze(-1).squeeze(-1)
__UpperCamelCase : List[Any] = bias_k.squeeze(-1).squeeze(-1)
__UpperCamelCase : str = weight_v.squeeze(-1).squeeze(-1)
__UpperCamelCase : int = bias_v.squeeze(-1).squeeze(-1)
__UpperCamelCase : List[str] = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1).squeeze(-1)
)
__UpperCamelCase : List[Any] = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1).squeeze(-1)
return new_checkpoint
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple) -> Any:
'''simple docstring'''
__UpperCamelCase : int = torch.load(lowercase__ , map_location="cpu")
__UpperCamelCase : str = {}
__UpperCamelCase : Any = checkpoint["time_embed.0.weight"]
__UpperCamelCase : Any = checkpoint["time_embed.0.bias"]
__UpperCamelCase : Dict = checkpoint["time_embed.2.weight"]
__UpperCamelCase : List[str] = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
__UpperCamelCase : Optional[int] = checkpoint["label_emb.weight"]
__UpperCamelCase : List[str] = checkpoint["input_blocks.0.0.weight"]
__UpperCamelCase : Tuple = checkpoint["input_blocks.0.0.bias"]
__UpperCamelCase : Dict = unet_config["down_block_types"]
__UpperCamelCase : int = unet_config["layers_per_block"]
__UpperCamelCase : Optional[Any] = unet_config["attention_head_dim"]
__UpperCamelCase : Union[str, Any] = unet_config["block_out_channels"]
__UpperCamelCase : List[str] = 1
__UpperCamelCase : str = channels_list[0]
for i, layer_type in enumerate(lowercase__):
__UpperCamelCase : Union[str, Any] = channels_list[i]
__UpperCamelCase : Any = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase__):
__UpperCamelCase : Tuple = F'down_blocks.{i}.resnets.{j}'
__UpperCamelCase : Dict = F'input_blocks.{current_layer}.0'
__UpperCamelCase : List[str] = True if j == 0 and downsample_block_has_skip else False
__UpperCamelCase : List[Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__)
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase__):
__UpperCamelCase : Optional[int] = F'down_blocks.{i}.resnets.{j}'
__UpperCamelCase : Optional[Any] = F'input_blocks.{current_layer}.0'
__UpperCamelCase : Tuple = True if j == 0 and downsample_block_has_skip else False
__UpperCamelCase : List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__)
__UpperCamelCase : List[str] = F'down_blocks.{i}.attentions.{j}'
__UpperCamelCase : Optional[int] = F'input_blocks.{current_layer}.1'
__UpperCamelCase : Any = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__)
current_layer += 1
if i != len(lowercase__) - 1:
__UpperCamelCase : Union[str, Any] = F'down_blocks.{i}.downsamplers.0'
__UpperCamelCase : Tuple = F'input_blocks.{current_layer}.0'
__UpperCamelCase : List[Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__)
current_layer += 1
__UpperCamelCase : List[Any] = current_channels
# hardcoded the mid-block for now
__UpperCamelCase : List[Any] = "mid_block.resnets.0"
__UpperCamelCase : Any = "middle_block.0"
__UpperCamelCase : str = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__)
__UpperCamelCase : Dict = "mid_block.attentions.0"
__UpperCamelCase : List[str] = "middle_block.1"
__UpperCamelCase : List[str] = convert_attention(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__)
__UpperCamelCase : str = "mid_block.resnets.1"
__UpperCamelCase : Optional[int] = "middle_block.2"
__UpperCamelCase : List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__)
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : Optional[Any] = unet_config["up_block_types"]
for i, layer_type in enumerate(lowercase__):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1):
__UpperCamelCase : Tuple = F'up_blocks.{i}.resnets.{j}'
__UpperCamelCase : int = F'output_blocks.{current_layer}.0'
__UpperCamelCase : List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__)
current_layer += 1
if i != len(lowercase__) - 1:
__UpperCamelCase : Dict = F'up_blocks.{i}.upsamplers.0'
__UpperCamelCase : Any = F'output_blocks.{current_layer-1}.1'
__UpperCamelCase : Optional[int] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__)
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1):
__UpperCamelCase : List[str] = F'up_blocks.{i}.resnets.{j}'
__UpperCamelCase : int = F'output_blocks.{current_layer}.0'
__UpperCamelCase : Tuple = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__)
__UpperCamelCase : List[str] = F'up_blocks.{i}.attentions.{j}'
__UpperCamelCase : Dict = F'output_blocks.{current_layer}.1'
__UpperCamelCase : Optional[Any] = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__)
current_layer += 1
if i != len(lowercase__) - 1:
__UpperCamelCase : Optional[int] = F'up_blocks.{i}.upsamplers.0'
__UpperCamelCase : Any = F'output_blocks.{current_layer-1}.2'
__UpperCamelCase : List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__)
__UpperCamelCase : Tuple = checkpoint["out.0.weight"]
__UpperCamelCase : Dict = checkpoint["out.0.bias"]
__UpperCamelCase : int = checkpoint["out.2.weight"]
__UpperCamelCase : Union[str, Any] = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
lowercase : List[Any] = parser.parse_args()
lowercase : Tuple = strabool(args.class_cond)
lowercase : Any = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
lowercase : Optional[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase : List[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
lowercase : List[Any] = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
lowercase : Union[str, Any] = None
lowercase : Dict = con_pt_to_diffuser(args.unet_path, unet_config)
lowercase : List[Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
lowercase : Dict = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
lowercase : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
lowercase : List[Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
lowercase : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
lowercase : List[str] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path) | 363 |
import random
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : bool = False) -> dict:
'''simple docstring'''
__UpperCamelCase : dict = {i: [] for i in range(_lowerCamelCase)}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_lowerCamelCase)
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_lowerCamelCase):
for j in range(i + 1 , _lowerCamelCase):
if random.random() < probability:
graph[i].append(_lowerCamelCase)
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_lowerCamelCase)
return graph
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_lowerCamelCase) if i != j] for i in range(_lowerCamelCase)
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 151 | 0 |
from __future__ import annotations
from fractions import Fraction
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( __lowerCAmelCase )-> list[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 11
_UpperCAmelCase = int('1' + '0' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
_UpperCAmelCase = 10
return solutions
def __A ( __lowerCAmelCase = 2 )-> int:
"""simple docstring"""
_UpperCAmelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
_UpperCAmelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 39 |
from __future__ import annotations
def __A ( __lowerCAmelCase )-> list[int]:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__lowerCAmelCase )
if n > 1:
factors.append(__lowerCAmelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 1 |
"""simple docstring"""
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
# getting number of pixels in the image
lowercase , lowercase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
lowercase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ :int = imread("image_data/lena.jpg", 1)
# convert to its negative
lowercase__ :List[str] = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 361 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self ,A__ ,A__=7 ,A__=3 ,A__=3_0 ,A__=4_0_0 ,A__=True ,A__=None ,A__=True ,A__=[0.5, 0.5, 0.5] ,A__=[0.5, 0.5, 0.5] ,A__=True ,A__=1 / 2_5_5 ,A__=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_pad
def A__ ( self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self ,A__ ,A__=False):
if not batched:
lowercase = image_inputs[0]
if isinstance(A__ ,Image.Image):
lowercase , lowercase = image.size
else:
lowercase , lowercase = image.shape[1], image.shape[2]
if w < h:
lowercase = int(self.size['''shortest_edge'''] * h / w)
lowercase = self.size['''shortest_edge''']
elif w > h:
lowercase = self.size['''shortest_edge''']
lowercase = int(self.size['''shortest_edge'''] * w / h)
else:
lowercase = self.size['''shortest_edge''']
lowercase = self.size['''shortest_edge''']
else:
lowercase = []
for image in image_inputs:
lowercase , lowercase = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
lowercase = max(A__ ,key=lambda A__: item[0])[0]
lowercase = max(A__ ,key=lambda A__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self):
lowercase = ConditionalDetrImageProcessingTester(self)
@property
def A__ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self):
lowercase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A__ ,'''image_mean'''))
self.assertTrue(hasattr(A__ ,'''image_std'''))
self.assertTrue(hasattr(A__ ,'''do_normalize'''))
self.assertTrue(hasattr(A__ ,'''do_resize'''))
self.assertTrue(hasattr(A__ ,'''size'''))
def A__ ( self):
lowercase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,A__)
lowercase = self.image_processing_class.from_dict(
self.image_processor_dict ,size=4_2 ,max_size=8_4 ,pad_and_return_pixel_mask=A__)
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2, '''longest_edge''': 8_4})
self.assertEqual(image_processor.do_pad ,A__)
def A__ ( self):
pass
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,Image.Image)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ ,numpify=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,np.ndarray)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def A__ ( self):
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ ,torchify=A__)
for image in image_inputs:
self.assertIsInstance(A__ ,torch.Tensor)
# Test not batched input
lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowercase = image_processing(A__ ,return_tensors='''pt''').pixel_values
lowercase , lowercase = self.image_processor_tester.get_expected_values(A__ ,batched=A__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def A__ ( self):
# prepare image and target
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' ,'''r''') as f:
lowercase = json.loads(f.read())
lowercase = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowercase = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''')
lowercase = image_processing(images=A__ ,annotations=A__ ,return_tensors='''pt''')
# verify pixel values
lowercase = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['''pixel_values'''].shape ,A__)
lowercase = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,A__ ,atol=1E-4))
# verify area
lowercase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,A__))
# verify boxes
lowercase = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,A__)
lowercase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,A__ ,atol=1E-3))
# verify image_id
lowercase = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,A__))
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,A__))
# verify class_labels
lowercase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,A__))
# verify orig_size
lowercase = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,A__))
# verify size
lowercase = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,A__))
@slow
def A__ ( self):
# prepare image, target and masks_path
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' ,'''r''') as f:
lowercase = json.loads(f.read())
lowercase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowercase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
lowercase = ConditionalDetrImageProcessor(format='''coco_panoptic''')
lowercase = image_processing(images=A__ ,annotations=A__ ,masks_path=A__ ,return_tensors='''pt''')
# verify pixel values
lowercase = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['''pixel_values'''].shape ,A__)
lowercase = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] ,A__ ,atol=1E-4))
# verify area
lowercase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] ,A__))
# verify boxes
lowercase = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape ,A__)
lowercase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] ,A__ ,atol=1E-3))
# verify image_id
lowercase = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] ,A__))
# verify is_crowd
lowercase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] ,A__))
# verify class_labels
lowercase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] ,A__))
# verify masks
lowercase = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() ,A__)
# verify orig_size
lowercase = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] ,A__))
# verify size
lowercase = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] ,A__))
| 97 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ = """lilt"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=None , __UpperCAmelCase=4 , __UpperCAmelCase=1024 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = classifier_dropout
__lowerCamelCase = channel_shrink_ratio
__lowerCamelCase = max_ad_position_embeddings
| 330 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCamelCase__ ( a__ : Any , a__ : Union[str, Any] ) -> int:
UpperCamelCase_ = checkpoint
UpperCamelCase_ = {}
UpperCamelCase_ = vae_state_dict["""encoder.conv_in.weight"""]
UpperCamelCase_ = vae_state_dict["""encoder.conv_in.bias"""]
UpperCamelCase_ = vae_state_dict["""encoder.conv_out.weight"""]
UpperCamelCase_ = vae_state_dict["""encoder.conv_out.bias"""]
UpperCamelCase_ = vae_state_dict["""encoder.norm_out.weight"""]
UpperCamelCase_ = vae_state_dict["""encoder.norm_out.bias"""]
UpperCamelCase_ = vae_state_dict["""decoder.conv_in.weight"""]
UpperCamelCase_ = vae_state_dict["""decoder.conv_in.bias"""]
UpperCamelCase_ = vae_state_dict["""decoder.conv_out.weight"""]
UpperCamelCase_ = vae_state_dict["""decoder.conv_out.bias"""]
UpperCamelCase_ = vae_state_dict["""decoder.norm_out.weight"""]
UpperCamelCase_ = vae_state_dict["""decoder.norm_out.bias"""]
UpperCamelCase_ = vae_state_dict["""quant_conv.weight"""]
UpperCamelCase_ = vae_state_dict["""quant_conv.bias"""]
UpperCamelCase_ = vae_state_dict["""post_quant_conv.weight"""]
UpperCamelCase_ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
UpperCamelCase_ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
UpperCamelCase_ = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(a__ )
}
# Retrieves the keys for the decoder up blocks only
UpperCamelCase_ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
UpperCamelCase_ = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(a__ )
}
for i in range(a__ ):
UpperCamelCase_ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
UpperCamelCase_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
UpperCamelCase_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
UpperCamelCase_ = renew_vae_resnet_paths(a__ )
UpperCamelCase_ = {"""old""": f'''down.{i}.block''', """new""": f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
UpperCamelCase_ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
UpperCamelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCamelCase_ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
UpperCamelCase_ = renew_vae_resnet_paths(a__ )
UpperCamelCase_ = {"""old""": f'''mid.block_{i}''', """new""": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
UpperCamelCase_ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
UpperCamelCase_ = renew_vae_attention_paths(a__ )
UpperCamelCase_ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
conv_attn_to_linear(a__ )
for i in range(a__ ):
UpperCamelCase_ = num_up_blocks - 1 - i
UpperCamelCase_ = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
UpperCamelCase_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
UpperCamelCase_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
UpperCamelCase_ = renew_vae_resnet_paths(a__ )
UpperCamelCase_ = {"""old""": f'''up.{block_id}.block''', """new""": f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
UpperCamelCase_ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
UpperCamelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCamelCase_ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
UpperCamelCase_ = renew_vae_resnet_paths(a__ )
UpperCamelCase_ = {"""old""": f'''mid.block_{i}''', """new""": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
UpperCamelCase_ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
UpperCamelCase_ = renew_vae_attention_paths(a__ )
UpperCamelCase_ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
conv_attn_to_linear(a__ )
return new_checkpoint
def lowerCamelCase__ ( a__ : str , a__ : str , ) -> List[Any]:
# Only support V1
UpperCamelCase_ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
UpperCamelCase_ = io.BytesIO(r.content )
UpperCamelCase_ = OmegaConf.load(a__ )
UpperCamelCase_ = 512
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
UpperCamelCase_ = {}
with safe_open(a__ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
UpperCamelCase_ = f.get_tensor(a__ )
else:
UpperCamelCase_ = torch.load(a__ , map_location=a__ )["""state_dict"""]
# Convert the VAE model.
UpperCamelCase_ = create_vae_diffusers_config(a__ , image_size=a__ )
UpperCamelCase_ = custom_convert_ldm_vae_checkpoint(a__ , a__ )
UpperCamelCase_ = AutoencoderKL(**a__ )
vae.load_state_dict(a__ )
vae.save_pretrained(a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
_A = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 122 | 0 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
snake_case_ : Optional[int] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
snake_case_ : Optional[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
snake_case_ : str = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] ,)
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Optional[int]="uniform_average" ,lowerCamelCase__ : Any=True ):
'''simple docstring'''
_UpperCamelCase : List[str] = mean_squared_error(
_a ,_a ,sample_weight=_a ,multioutput=_a ,squared=_a )
return {"mse": mse}
| 350 |
'''simple docstring'''
from datetime import datetime
import requests
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_UpperCamelCase : Optional[int] = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase_ ).content
if __name__ == "__main__":
snake_case_ : List[str] = input('Enter Video/IGTV url: ').strip()
snake_case_ : str = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 236 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
UpperCAmelCase_ : Tuple = TypeVar('T')
class lowercase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = data
_SCREAMING_SNAKE_CASE : int = self
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
class lowercase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# map from node name to the node object
_SCREAMING_SNAKE_CASE : dict[T, DisjointSetTreeNode[T]] = {}
def UpperCAmelCase_ ( self , __snake_case ):
# create a new set with x as its member
_SCREAMING_SNAKE_CASE : List[str] = DisjointSetTreeNode(__snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
# find the set x belongs to (with path-compression)
_SCREAMING_SNAKE_CASE : Any = self.map[data]
if elem_ref != elem_ref.parent:
_SCREAMING_SNAKE_CASE : Tuple = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
# helper function for union operation
if nodea.rank > nodea.rank:
_SCREAMING_SNAKE_CASE : Union[str, Any] = nodea
else:
_SCREAMING_SNAKE_CASE : Any = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
# merge 2 disjoint sets
self.link(self.find_set(__snake_case ) , self.find_set(__snake_case ) )
class lowercase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
_SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {}
def UpperCAmelCase_ ( self , __snake_case ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
def UpperCAmelCase_ ( self , __snake_case , __snake_case , __snake_case ):
# add an edge with the given weight
self.add_node(__snake_case )
self.add_node(__snake_case )
_SCREAMING_SNAKE_CASE : int = weight
_SCREAMING_SNAKE_CASE : Optional[int] = weight
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __snake_case : x[2] )
# creating the disjoint set
_SCREAMING_SNAKE_CASE : List[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__snake_case )
# MST generation
_SCREAMING_SNAKE_CASE : Dict = 0
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
_SCREAMING_SNAKE_CASE : str = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = edges[index]
index += 1
_SCREAMING_SNAKE_CASE : Tuple = disjoint_set.find_set(__snake_case )
_SCREAMING_SNAKE_CASE : int = disjoint_set.find_set(__snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__snake_case , __snake_case , __snake_case )
disjoint_set.union(__snake_case , __snake_case )
return graph
| 200 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : UNetaDModel
A_ : ScoreSdeVeScheduler
def __init__( self , __snake_case , __snake_case ):
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self , __snake_case = 1 , __snake_case = 2000 , __snake_case = None , __snake_case = "pil" , __snake_case = True , **__snake_case , ):
_SCREAMING_SNAKE_CASE : Optional[int] = self.unet.config.sample_size
_SCREAMING_SNAKE_CASE : Optional[Any] = (batch_size, 3, img_size, img_size)
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet
_SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(__snake_case , generator=__snake_case ) * self.scheduler.init_noise_sigma
_SCREAMING_SNAKE_CASE : Any = sample.to(self.device )
self.scheduler.set_timesteps(__snake_case )
self.scheduler.set_sigmas(__snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_SCREAMING_SNAKE_CASE : Any = self.unet(__snake_case , __snake_case ).sample
_SCREAMING_SNAKE_CASE : Any = self.scheduler.step_correct(__snake_case , __snake_case , generator=__snake_case ).prev_sample
# prediction step
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__snake_case , __snake_case ).sample
_SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step_pred(__snake_case , __snake_case , __snake_case , generator=__snake_case )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = output.prev_sample, output.prev_sample_mean
_SCREAMING_SNAKE_CASE : Tuple = sample_mean.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__snake_case )
| 200 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A : str ,A : Optional[Any]=13 ,A : Dict=7 ,A : List[str]=True ,A : Optional[int]=True ,A : Union[str, Any]=True ,A : Optional[int]=True ,A : Any=True ,A : str=False ,A : Tuple=False ,A : Union[str, Any]=False ,A : Any=2 ,A : List[Any]=99 ,A : List[Any]=0 ,A : List[str]=32 ,A : Tuple=5 ,A : Optional[Any]=4 ,A : int=0.1 ,A : Dict=0.1 ,A : Union[str, Any]=5_12 ,A : Any=12 ,A : Tuple=2 ,A : str=0.02 ,A : List[str]=3 ,A : Optional[Any]=4 ,A : Optional[int]="last" ,A : List[str]=None ,A : Optional[int]=None ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_lengths
__A = use_token_type_ids
__A = use_labels
__A = gelu_activation
__A = sinusoidal_embeddings
__A = causal
__A = asm
__A = n_langs
__A = vocab_size
__A = n_special
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = summary_type
__A = use_proj
__A = scope
def UpperCamelCase_ ( self : str ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_input_lengths:
__A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,2 ).float()
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase_ ( self : Any ):
return FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,)
def UpperCamelCase_ ( self : Union[str, Any] ,A : Dict ,A : Tuple ,A : Dict ,A : List[str] ,A : Dict ,A : Dict ,A : int ,A : Optional[Any] ,A : Tuple ,):
__A = FlaubertModel(config=A )
model.to(A )
model.eval()
__A = model(A ,lengths=A ,langs=A )
__A = model(A ,langs=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Dict ,A : Any ,A : Any ,A : Union[str, Any] ,A : Union[str, Any] ,A : Dict ,A : int ,A : List[Any] ,A : int ,A : Any ,):
__A = FlaubertWithLMHeadModel(A )
model.to(A )
model.eval()
__A = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : Dict ,A : Any ,A : Dict ,A : Optional[int] ,A : List[Any] ,A : int ,A : List[str] ,A : Dict ,A : Union[str, Any] ,):
__A = FlaubertForQuestionAnsweringSimple(A )
model.to(A )
model.eval()
__A = model(A )
__A = model(A ,start_positions=A ,end_positions=A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : List[Any] ,A : Optional[int] ,A : List[str] ,A : Dict ,A : Any ,A : Optional[int] ,A : int ,A : Dict ,A : Union[str, Any] ,A : Dict ,):
__A = FlaubertForQuestionAnswering(A )
model.to(A )
model.eval()
__A = model(A )
__A = model(
A ,start_positions=A ,end_positions=A ,cls_index=A ,is_impossible=A ,p_mask=A ,)
__A = model(
A ,start_positions=A ,end_positions=A ,cls_index=A ,is_impossible=A ,)
((__A) , ) = result_with_labels.to_tuple()
__A = model(A ,start_positions=A ,end_positions=A )
((__A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def UpperCamelCase_ ( self : List[Any] ,A : int ,A : Optional[int] ,A : Optional[int] ,A : Optional[Any] ,A : Optional[int] ,A : Any ,A : Tuple ,A : str ,A : Union[str, Any] ,):
__A = FlaubertForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A )
__A = model(A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : List[str] ,A : Any ,A : List[Any] ,A : Union[str, Any] ,A : Any ,A : List[Any] ,A : str ,A : Dict ,A : Dict ,):
__A = self.num_labels
__A = FlaubertForTokenClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : int ,A : int ,A : Optional[Any] ,A : List[str] ,A : Dict ,A : Optional[Any] ,A : Optional[int] ,A : Optional[Any] ,A : List[Any] ,A : int ,):
__A = self.num_choices
__A = FlaubertForMultipleChoice(config=A )
model.to(A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Any ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase_ ( self : Union[str, Any] ,A : Union[str, Any] ,A : Tuple ,A : int ,A : Union[str, Any] ,A : Any ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase_ ( self : int ,A : Union[str, Any] ,A : List[Any] ,A : int=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def UpperCamelCase_ ( self : int ):
__A = FlaubertModelTester(self )
__A = ConfigTester(self ,config_class=A ,emb_dim=37 )
def UpperCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*A )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = FlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self : List[str] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__A = True
__A = model_class(config=A )
__A = self._prepare_for_class(A ,A )
__A = torch.jit.trace(
A ,(inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A ,os.path.join(A ,"traced_model.pt" ) )
__A = torch.jit.load(os.path.join(A ,"traced_model.pt" ) ,map_location=A )
loaded(inputs_dict["input_ids"].to(A ) ,inputs_dict["attention_mask"].to(A ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Tuple ):
__A = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
__A = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__A = model(A )[0]
__A = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape ,A )
__A = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A ,atol=1E-4 ) )
| 124 |
from __future__ import annotations
def UpperCAmelCase ( a_ = 4 ) -> list[list[int]]:
"""simple docstring"""
__A = abs(a_ ) or 4
return [[1 + x + y * row_size for x in range(a_ )] for y in range(a_ )]
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(a_ ) )
# OR.. transpose(reverse_column(matrix))
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(a_ ) )
# OR.. reverse_column(reverse_row(matrix))
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(a_ ) )
# OR.. transpose(reverse_row(matrix))
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
__A = [list(a_ ) for x in zip(*a_ )]
return matrix
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
__A = matrix[::-1]
return matrix
def UpperCAmelCase ( a_ ) -> list[list[int]]:
"""simple docstring"""
__A = [x[::-1] for x in matrix]
return matrix
def UpperCAmelCase ( a_ ) -> None:
"""simple docstring"""
for i in matrix:
print(*a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 90 counterclockwise:\n')
print_matrix(rotate_aa(matrix))
SCREAMING_SNAKE_CASE :Tuple = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 180:\n')
print_matrix(rotate_aaa(matrix))
SCREAMING_SNAKE_CASE :Union[str, Any] = make_matrix()
print('\norigin:\n')
print_matrix(matrix)
print('\nrotate 270 counterclockwise:\n')
print_matrix(rotate_aaa(matrix))
| 124 | 1 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase=None , **lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
_lowercase =model
_lowercase =kwargs.get('model_save_dir' , lowerCAmelCase )
_lowercase =kwargs.get('latest_model_name' , lowerCAmelCase )
def __call__( self , **lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase ={k: np.array(lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase , lowerCAmelCase )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> str:
'''simple docstring'''
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
_lowercase ='CPUExecutionProvider'
return ort.InferenceSession(lowerCAmelCase , providers=[provider] , sess_options=lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =file_name if file_name is not None else ONNX_WEIGHTS_NAME
_lowercase =self.model_save_dir.joinpath(self.latest_model_name )
_lowercase =Path(lowerCAmelCase ).joinpath(lowerCAmelCase )
try:
shutil.copyfile(lowerCAmelCase , lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
_lowercase =self.model_save_dir.joinpath(lowerCAmelCase )
if src_path.exists():
_lowercase =Path(lowerCAmelCase ).joinpath(lowerCAmelCase )
try:
shutil.copyfile(lowerCAmelCase , lowerCAmelCase )
except shutil.SameFileError:
pass
def A__ ( self , lowerCAmelCase , **lowerCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
if os.path.isfile(lowerCAmelCase ):
logger.error(F'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
# saving model weights/files
self._save_pretrained(lowerCAmelCase , **lowerCAmelCase )
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> Any:
'''simple docstring'''
_lowercase =file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase ):
_lowercase =OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase , lowerCAmelCase ) , provider=lowerCAmelCase , sess_options=lowerCAmelCase )
_lowercase =Path(lowerCAmelCase )
# load model from hub
else:
# download model
_lowercase =hf_hub_download(
repo_id=lowerCAmelCase , filename=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , )
_lowercase =Path(lowerCAmelCase ).parent
_lowercase =Path(lowerCAmelCase ).name
_lowercase =OnnxRuntimeModel.load_model(lowerCAmelCase , provider=lowerCAmelCase , sess_options=lowerCAmelCase )
return cls(model=lowerCAmelCase , **lowerCAmelCase )
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
_lowercase =None
if len(str(lowerCAmelCase ).split('@' ) ) == 2:
_lowercase , _lowercase =model_id.split('@' )
return cls._from_pretrained(
model_id=lowerCAmelCase , revision=lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , use_auth_token=lowerCAmelCase , **lowerCAmelCase , )
| 205 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def A ( _lowerCamelCase , _lowerCamelCase=() , _lowerCamelCase=None , _lowerCamelCase="no" , _lowerCamelCase="29500" ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : List[Any] = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
_lowerCAmelCase : Dict = True
elif "IPython" in sys.modules:
_lowerCAmelCase : List[str] = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
_lowerCAmelCase : str = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , _UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
_lowerCAmelCase : Optional[Any] = 8
_lowerCAmelCase : Dict = PrepareForLaunch(_UpperCamelCase , distributed_type="TPU" )
print(F"Launching a training on {num_processes} TPU cores." )
xmp.spawn(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*_UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr="127.0.01" , master_port=_UpperCamelCase , mixed_precision=_UpperCamelCase ):
_lowerCAmelCase : Optional[int] = PrepareForLaunch(_UpperCamelCase , distributed_type="MULTI_GPU" )
print(F"Launching training on {num_processes} GPUs." )
try:
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
_lowerCAmelCase : Dict = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*_UpperCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase=() , _lowerCamelCase=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_UpperCamelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
_lowerCAmelCase : Optional[int] = PrepareForLaunch(_UpperCamelCase , debug=_UpperCamelCase )
start_processes(_UpperCamelCase , args=_UpperCamelCase , nprocs=_UpperCamelCase , start_method="fork" )
| 368 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(__a, __a, return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_lowerCAmelCase : Tuple = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa)
return inputs_dict
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a=13, __a=7, __a=True, __a=True, __a=True, __a=True, __a=99, __a=32, __a=32, __a=2, __a=4, __a=37, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=16, __a=2, __a=0.02, __a=3, __a=4, __a=None, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : List[Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Dict = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : List[str] = num_labels
_lowerCAmelCase : List[Any] = num_choices
_lowerCAmelCase : str = scope
_lowerCAmelCase : Union[str, Any] = embedding_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowerCAmelCase : str = ids_tensor([self.batch_size], self.num_choices)
_lowerCAmelCase : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, embedding_size=self.embedding_size, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertModel(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Any = model(__a)
_lowerCAmelCase : Optional[Any] = [input_ids, input_mask]
_lowerCAmelCase : List[Any] = model(__a)
_lowerCAmelCase : Any = model(__a)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForMaskedLM(config=__a)
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForNextSentencePrediction(config=__a)
_lowerCAmelCase : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TFMobileBertForPreTraining(config=__a)
_lowerCAmelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(
result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : Optional[Any] = TFMobileBertForSequenceClassification(config=__a)
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Optional[Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.num_choices
_lowerCAmelCase : List[Any] = TFMobileBertForMultipleChoice(config=__a)
_lowerCAmelCase : Dict = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : List[str] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__a, 1), (1, self.num_choices, 1))
_lowerCAmelCase : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
_lowerCAmelCase : List[str] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.num_labels
_lowerCAmelCase : Union[str, Any] = TFMobileBertForTokenClassification(config=__a)
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = TFMobileBertForQuestionAnswering(config=__a)
_lowerCAmelCase : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_lowerCAmelCase : Union[str, Any] = model(__a)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self)
_lowerCAmelCase : List[Any] = ConfigTester(self, config_class=__a, hidden_size=37)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["google/mobilebert-uncased"]:
_lowerCAmelCase : List[Any] = TFMobileBertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
_lowerCAmelCase : Any = tf.constant([[0, 1, 2, 3, 4, 5]])
_lowerCAmelCase : Tuple = model(__a)[0]
_lowerCAmelCase : Union[str, Any] = [1, 6, 3_0522]
self.assertEqual(output.shape, __a)
_lowerCAmelCase : Tuple = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
])
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-4)
| 300 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCamelCase = ''
else:
__lowerCamelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCamelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCamelCase = in_proj_bias[: config.hidden_size]
__lowerCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = dct.pop(UpperCamelCase__ )
__lowerCamelCase = val
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Dict=True ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCamelCase = 8
# set labels if required
if not base_model:
__lowerCamelCase = 1000
__lowerCamelCase = 'huggingface/label-files'
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCamelCase = 384
__lowerCamelCase = 1536
__lowerCamelCase = 12
__lowerCamelCase = 6
# load original model from torch hub
__lowerCamelCase = torch.hub.load('facebookresearch/dino:main' , UpperCamelCase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCamelCase = original_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
__lowerCamelCase = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
if base_model:
__lowerCamelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ).eval()
else:
__lowerCamelCase = ViTForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCamelCase = ViTImageProcessor()
__lowerCamelCase = image_processor(images=prepare_img() , return_tensors='pt' )
__lowerCamelCase = encoding['pixel_values']
__lowerCamelCase = model(UpperCamelCase__ )
if base_model:
__lowerCamelCase = original_model(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__lowerCamelCase = original_model(UpperCamelCase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__A = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 90 |
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = credit_card_number
lowercase__ = 0
lowercase__ = len(SCREAMING_SNAKE_CASE ) - 2
for i in range(SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowercase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowercase__ = cc_number[:i] + str(SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(SCREAMING_SNAKE_CASE ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(SCREAMING_SNAKE_CASE ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(SCREAMING_SNAKE_CASE ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 110 | 0 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCAmelCase_ :
def __init__( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Tuple=16 , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=7 , snake_case_ : Optional[Any]=14 , snake_case_ : List[str]=10 , snake_case_ : Optional[Any]=19 , snake_case_ : List[str]=5 , snake_case_ : Optional[Any]=4 , snake_case_ : str=True , snake_case_ : Tuple=16 , snake_case_ : Union[str, Any]=2 , snake_case_ : Tuple=4 , snake_case_ : Optional[int]=4 , snake_case_ : List[Any]="gelu" , snake_case_ : int=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Dict=[1, 2, 3, 4, 5] , snake_case_ : str=25 , snake_case_ : List[str]=5 , ) -> str:
'''simple docstring'''
A__ = d_model
A__ = parent
A__ = batch_size
A__ = prediction_length
A__ = context_length
A__ = cardinality
A__ = num_time_features
A__ = lags_sequence
A__ = embedding_dimension
A__ = is_training
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = context_length
A__ = prediction_length + label_length
A__ = label_length
A__ = moving_average
A__ = autocorrelation_factor
def __magic_name__ ( self : List[str] ) -> str:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __magic_name__ ( self : Optional[int] , snake_case_ : List[Any] ) -> Tuple:
'''simple docstring'''
A__ = config.context_length + max(config.lags_sequence )
A__ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
A__ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
A__ = floats_tensor([self.batch_size, _past_length] )
A__ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
A__ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
A__ = floats_tensor([self.batch_size, config.prediction_length] )
A__ = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = self.get_config()
A__ = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def __magic_name__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__, A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
A__ = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
A__ = model(**snake_case_ )
A__ = outputs.encoder_last_hidden_state
A__ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = model.get_encoder()
encoder.save_pretrained(snake_case_ )
A__ = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
A__, A__, A__, A__, A__ = model.create_network_inputs(**snake_case_ )
A__, A__ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
A__ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
A__ = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
A__ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
A__ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
A__ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
A__ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = model.get_decoder()
decoder.save_pretrained(snake_case_ )
A__ = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
A__ = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ = AutoformerModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
A__, A__ = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["missing_keys"] , [] )
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def __magic_name__ ( self : str ) -> int:
'''simple docstring'''
pass
def __magic_name__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ = inspect.signature(getattr(snake_case_ , "forward" ) )
# The main input is the name of the argument after `self`
A__ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def __magic_name__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def __magic_name__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
A__ = getattr(self.model_tester , "seq_length" , snake_case_ )
A__ = getattr(self.model_tester , "decoder_seq_length" , snake_case_ )
A__ = getattr(self.model_tester , "encoder_seq_length" , snake_case_ )
A__ = getattr(self.model_tester , "d_model" , snake_case_ )
A__ = getattr(self.model_tester , "num_attention_heads" , snake_case_ )
A__ = d_model // num_attention_heads
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
A__ = len(snake_case_ )
A__ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
A__ = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
A__ = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
A__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __magic_name__ ( self : List[str] ) -> Dict:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _SCREAMING_SNAKE_CASE ( lowercase_="train-batch.pt" ) -> Dict:
A__ = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=lowercase_ , repo_type="dataset" )
A__ = torch.load(lowercase_ , map_location=lowercase_ )
return batch
@require_torch
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[str] ) -> int:
'''simple docstring'''
A__ = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
A__ = prepare_batch()
with torch.no_grad():
A__ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
A__ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
A__ = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def __magic_name__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A__ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
A__ = prepare_batch("val-batch.pt" )
with torch.no_grad():
A__ = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
A__ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
A__ = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(snake_case_ )
A__ = prepare_batch("val-batch.pt" )
with torch.no_grad():
A__ = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
A__ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
A__ = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=snake_case_ )
A__ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 363 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : Optional[int] , snake_case_ : int = 16 , snake_case_ : int = 88 , snake_case_ : Optional[int] = None , snake_case_ : int = 1 , snake_case_ : float = 0.0 , snake_case_ : int = 32 , snake_case_ : Optional[int] = None , snake_case_ : bool = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , snake_case_ : str = "geglu" , snake_case_ : Optional[int] = None , ) -> str:
'''simple docstring'''
super().__init__()
A__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case_ , attention_head_dim=snake_case_ , in_channels=snake_case_ , num_layers=snake_case_ , dropout=snake_case_ , norm_num_groups=snake_case_ , cross_attention_dim=snake_case_ , attention_bias=snake_case_ , sample_size=snake_case_ , num_vector_embeds=snake_case_ , activation_fn=snake_case_ , num_embeds_ada_norm=snake_case_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
A__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
A__ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
A__ = [1, 0]
def __magic_name__ ( self : Dict , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Any=None , snake_case_ : int=None , snake_case_ : Union[str, Any]=None , snake_case_ : bool = True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = hidden_states
A__ = []
A__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
A__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
A__ = self.transformer_index_for_condition[i]
A__ = self.transformers[transformer_index](
snake_case_ , encoder_hidden_states=snake_case_ , timestep=snake_case_ , cross_attention_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
A__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
A__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case_ )
| 230 | 0 |
"""simple docstring"""
import math
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : Any = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
__lowerCAmelCase : Tuple = [True] * (end + 1)
__lowerCAmelCase : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
__lowerCAmelCase : int = False
start += 1
prime += in_prime
__lowerCAmelCase : Union[str, Any] = end + 1
__lowerCAmelCase : Tuple = min(2 * end , _UpperCamelCase )
while low <= n:
__lowerCAmelCase : List[str] = [True] * (high - low + 1)
for each in in_prime:
__lowerCAmelCase : int = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
__lowerCAmelCase : Any = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
__lowerCAmelCase : Tuple = high + 1
__lowerCAmelCase : int = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6)) | 86 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : str = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = '''mobilenet_v2'''
def __init__( self :Optional[Any] ,__snake_case :List[Any]=3 ,__snake_case :Any=2_24 ,__snake_case :Dict=1.0 ,__snake_case :str=8 ,__snake_case :Union[str, Any]=8 ,__snake_case :Optional[Any]=6 ,__snake_case :str=32 ,__snake_case :Tuple=True ,__snake_case :Union[str, Any]=True ,__snake_case :Any="relu6" ,__snake_case :List[str]=True ,__snake_case :Dict=0.8 ,__snake_case :Optional[int]=0.02 ,__snake_case :Tuple=0.0_01 ,__snake_case :Dict=2_55 ,**__snake_case :Dict ,) -> Any:
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
a__ = num_channels
a__ = image_size
a__ = depth_multiplier
a__ = depth_divisible_by
a__ = min_depth
a__ = expand_ratio
a__ = output_stride
a__ = first_layer_is_expansion
a__ = finegrained_output
a__ = hidden_act
a__ = tf_padding
a__ = classifier_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = semantic_loss_ignore_index
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = version.parse('''1.11''' )
@property
def lowerCamelCase__( self :Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def lowerCamelCase__( self :List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def lowerCamelCase__( self :Any ) -> float:
return 1E-4
| 240 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 365 |
from functools import lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> set:
UpperCamelCase_ = 2
UpperCamelCase_ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase_ )
if n > 1:
factors.add(UpperCamelCase_ )
return factors
@lru_cache
def lowerCAmelCase_ ( UpperCamelCase_ ) -> int:
return len(unique_prime_factors(UpperCamelCase_ ) )
def lowerCAmelCase_ ( UpperCamelCase_ ) -> bool:
return len(set(UpperCamelCase_ ) ) in (0, 1)
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = 2
while True:
# Increment each value of a generated range
UpperCamelCase_ = [base + i for i in range(UpperCamelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCamelCase_ = [upf_len(UpperCamelCase_ ) for x in group]
checker.append(UpperCamelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase_ ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase_ ( UpperCamelCase_ = 4 ) -> int:
UpperCamelCase_ = run(UpperCamelCase_ )
return results[0] if len(UpperCamelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 328 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
if length <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(lowercase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 247 |
"""simple docstring"""
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
from torch.utils.cpp_extension import load
A__ = Path(lowercase_ ).resolve().parent.parent.parent / "kernels" / "deformable_detr"
A__ = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp" ),
os.path.join("cuda" , "ms_deform_attn_cuda.cu" ),
]
]
load(
"MultiScaleDeformableAttention" , lowercase_ , with_cuda=lowercase_ , extra_include_paths=[str(lowercase_ )] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 247 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowercase_ ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=9_9 , __UpperCamelCase=3_2 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=3_7 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=5_1_2 , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=0.02 , __UpperCamelCase=4 , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_attention_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_attention_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = True
UpperCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = True
A__ : Any = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__UpperCamelCase )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__UpperCamelCase )
UpperCamelCase_ = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
UpperCamelCase_ = model(__UpperCamelCase )[0]
UpperCamelCase_ = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , __UpperCamelCase )
# compare the actual values for a slice.
UpperCamelCase_ = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__UpperCamelCase )
UpperCamelCase_ = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
UpperCamelCase_ = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
UpperCamelCase_ = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
| 261 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCamelCase__ ( a__ : Dataset , a__ : Dict[str, str] ) -> int:
UpperCamelCase_ = args.log_outputs
UpperCamelCase_ = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCamelCase_ = load_metric("""wer""" )
UpperCamelCase_ = load_metric("""cer""" )
# compute metrics
UpperCamelCase_ = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
UpperCamelCase_ = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
UpperCamelCase_ = f'''WER: {wer_result}\nCER: {cer_result}'''
print(a__ )
with open(f'''{dataset_id}_eval_results.txt''' , """w""" ) as f:
f.write(a__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase_ = f'''log_{dataset_id}_predictions.txt'''
UpperCamelCase_ = f'''log_{dataset_id}_targets.txt'''
with open(a__ , """w""" ) as p, open(a__ , """w""" ) as t:
# mapping function to write output
def write_to_file(a__ : List[str] , a__ : Any ):
p.write(f'''{i}''' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f'''{i}''' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(a__ , with_indices=a__ )
def lowerCamelCase__ ( a__ : str ) -> str:
UpperCamelCase_ = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase_ = re.sub(a__ , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase_ = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCamelCase_ = """ """.join(text.split(a__ ) )
return text
def lowerCamelCase__ ( a__ : Optional[int] ) -> Union[str, Any]:
# load dataset
UpperCamelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase_ = feature_extractor.sampling_rate
# resample audio
UpperCamelCase_ = dataset.cast_column("""audio""" , Audio(sampling_rate=a__ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase_ = 0 if torch.cuda.is_available() else -1
UpperCamelCase_ = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(a__ : Optional[Any] ):
UpperCamelCase_ = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCamelCase_ = prediction["""text"""]
UpperCamelCase_ = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCamelCase_ = dataset.map(a__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(a__ , a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
_A = parser.parse_args()
main(args)
| 261 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XGLMTokenizer
UpperCamelCase = XGLMTokenizerFast
UpperCamelCase = True
UpperCamelCase = True
def snake_case_( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE = XGLMTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = """<pad>"""
_SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(A ) , 1008 )
def snake_case_( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = XGLMTokenizer(A , keep_accents=A )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case_( self ) -> str:
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def snake_case_( self ) -> int:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name )
_SCREAMING_SNAKE_CASE = XGLMTokenizer(f.name , keep_accents=A )
_SCREAMING_SNAKE_CASE = pickle.dumps(A )
pickle.loads(A )
def snake_case_( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
_SCREAMING_SNAKE_CASE = tokenizer.tokenize(A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = tokenizer.encode(A , add_special_tokens=A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
_SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE = tokenizer.encode(A )
_SCREAMING_SNAKE_CASE = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = """Hello World!"""
_SCREAMING_SNAKE_CASE = [2, 3_1227, 4447, 35]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_SCREAMING_SNAKE_CASE = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def snake_case_( self ) -> List[str]:
# fmt: off
_SCREAMING_SNAKE_CASE = {
"""input_ids""": [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/xglm-564M""" , padding=A , )
| 58 |
'''simple docstring'''
import string
def _lowercase ( __A ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCamelCase = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCamelCase = string.ascii_uppercase.find(__A )
__UpperCamelCase = num - key
if num < 0:
__UpperCamelCase = num + len(string.ascii_uppercase )
__UpperCamelCase = translated + string.ascii_uppercase[num]
else:
__UpperCamelCase = translated + symbol
print(f"Decryption using Key #{key}: {translated}" )
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = input("""Encrypted message: """ )
__UpperCamelCase = message.upper()
decrypt(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 349 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = """▁"""
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """sentencepiece.bpe.model"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
_SCREAMING_SNAKE_CASE = {
"""facebook/mbart-large-en-ro""": 10_24,
"""facebook/mbart-large-cc25""": 10_24,
}
# fmt: off
_SCREAMING_SNAKE_CASE = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[str] = VOCAB_FILES_NAMES
__magic_name__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: Union[str, Any] = ["input_ids", "attention_mask"]
__magic_name__: List[int] = []
__magic_name__: List[int] = []
def __init__( self : Dict , _A : Optional[int] , _A : List[str]="<s>" , _A : Optional[int]="</s>" , _A : List[str]="</s>" , _A : Any="<s>" , _A : Optional[int]="<unk>" , _A : Union[str, Any]="<pad>" , _A : Any="<mask>" , _A : str=None , _A : Union[str, Any]=None , _A : str=None , _A : Optional[Dict[str, Any]] = None , _A : Dict=None , **_A : Union[str, Any] , ) -> Any:
"""simple docstring"""
snake_case_ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
snake_case_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , tokenizer_file=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
snake_case_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
snake_case_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ : Any = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ : Optional[int] = 1
snake_case_ : Optional[Any] = len(self.sp_model )
snake_case_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
snake_case_ : List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ : List[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case_ : List[str] = src_lang if src_lang is not None else 'en_XX'
snake_case_ : Optional[Any] = self.lang_code_to_id[self._src_lang]
snake_case_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : int ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = self.__dict__.copy()
snake_case_ : List[Any] = None
snake_case_ : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , _A : str ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ : List[Any] = {}
snake_case_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : Tuple , _A : str ) -> None:
"""simple docstring"""
snake_case_ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
snake_case_ : Union[str, Any] = [1] * len(self.prefix_tokens )
snake_case_ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def UpperCAmelCase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : Tuple = [self.sep_token_id]
snake_case_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : List[str] , _A : Dict , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : Optional[int] ) -> int:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case_ : List[str] = src_lang
snake_case_ : Optional[Any] = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
snake_case_ : Tuple = self.convert_tokens_to_ids(_A )
snake_case_ : Optional[Any] = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self : Optional[int] , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> Tuple:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ : Dict = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self : Union[str, Any] , _A : List[str] ) -> List[Any]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self : List[str] , _A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self : Tuple , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ : List[Any] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
snake_case_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def UpperCAmelCase_ ( self : Optional[int] , _A : List[str] , _A : str = "en_XX" , _A : Optional[List[str]] = None , _A : str = "ro_RO" , **_A : str , ) -> BatchEncoding:
"""simple docstring"""
snake_case_ : int = src_lang
snake_case_ : Any = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : List[str] , _A : Optional[Any] ) -> None:
"""simple docstring"""
snake_case_ : int = self.lang_code_to_id[src_lang]
snake_case_ : List[str] = []
snake_case_ : int = [self.eos_token_id, self.cur_lang_code]
def UpperCAmelCase_ ( self : str , _A : str ) -> None:
"""simple docstring"""
snake_case_ : int = self.lang_code_to_id[lang]
snake_case_ : Tuple = []
snake_case_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 354 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__:
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_2 , __lowercase=5 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> List[Any]:
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : List[str] = seq_length
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : Optional[Any] = use_input_mask
lowerCAmelCase_ : List[Any] = use_token_type_ids
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Tuple = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[Any] = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Tuple = scope
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = None
if self.use_token_type_ids:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : List[str] = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> Optional[Any]:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = NystromformerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Dict = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
lowerCAmelCase_ : Dict = model(__lowercase , token_type_ids=__lowercase )
lowerCAmelCase_ : Dict = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : List[Any] = NystromformerForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = NystromformerForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : List[Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCAmelCase_ : Optional[Any] = self.num_labels
lowerCAmelCase_ : List[str] = NystromformerForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Optional[int] = NystromformerForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = self.num_choices
lowerCAmelCase_ : List[Any] = NystromformerForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case__( UpperCAmelCase__, UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : int = False
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Any = NystromformerModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 )
def lowercase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : Optional[int] = type
self.model_tester.create_and_check_model(*__lowercase )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def lowercase_ ( self ) -> Optional[Any]:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[int] = NystromformerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : Any = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(__lowercase )[0]
lowerCAmelCase_ : Optional[Any] = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __lowercase )
lowerCAmelCase_ : Dict = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = '''the [MASK] of Belgium is Brussels'''
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
lowerCAmelCase_ : Optional[int] = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
lowerCAmelCase_ : List[str] = tokenizer(__lowercase , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(encoding.input_ids ).logits
lowerCAmelCase_ : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__lowercase ) , '''capital''' ) | 262 |
def lowerCAmelCase ( lowerCAmelCase_ = 10**9 )-> int:
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""") | 262 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 351 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=99 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : List[str]=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=512 , SCREAMING_SNAKE_CASE__ : List[Any]=16 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : str=0 , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = projection_dim
def a ( self : Any ) -> Any:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
lowerCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRContextEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
lowerCAmelCase__ = TFDPRReader(config=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def a ( self : Optional[int] ) -> Union[str, Any]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
snake_case__ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : Dict ) -> int:
lowerCAmelCase__ = TFDPRModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def a ( self : List[Any] ) -> List[Any]:
self.config_tester.run_common_tests()
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*SCREAMING_SNAKE_CASE__ )
def a ( self : List[Any] ) -> Dict:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : List[str] ) -> Union[str, Any]:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRReader.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def a ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
lowerCAmelCase__ = tf.constant(
[[101, 7_592, 1_010, 2_003, 2_026, 3_899, 10_140, 1_029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 221 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ : List[Any] = logging.get_logger(__name__)
a_ : str = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : Any = '''deberta-v2'''
def __init__( self , a=12_8100 , a=1536 , a=24 , a=24 , a=6144 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=0 , a=0.02 , a=1E-7 , a=False , a=-1 , a=0 , a=True , a=None , a=0 , a="gelu" , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = relative_attention
SCREAMING_SNAKE_CASE = max_relative_positions
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(a) == str:
SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split('|')]
SCREAMING_SNAKE_CASE = pos_att_type
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = kwargs.get('pooler_hidden_size' , a)
SCREAMING_SNAKE_CASE = pooler_dropout
SCREAMING_SNAKE_CASE = pooler_hidden_act
class _snake_case ( A__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = -1 , a = False , a = None , a = 3 , a = 40 , a = 40 , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=a , framework=a)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 137 |
import os
from datetime import datetime as dt
from github import Github
a_ : Tuple = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Github(os.environ['GITHUB_TOKEN'])
SCREAMING_SNAKE_CASE = g.get_repo('huggingface/diffusers')
SCREAMING_SNAKE_CASE = repo.get_issues(state='open')
for issue in open_issues:
SCREAMING_SNAKE_CASE = sorted(issue.get_comments() , key=lambda _UpperCAmelCase: i.created_at , reverse=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = comments[0] if len(_UpperCAmelCase) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed')
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open')
issue.remove_from_labels('stale')
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.')
issue.add_to_labels('stale')
if __name__ == "__main__":
main()
| 137 | 1 |
'''simple docstring'''
import functools
from typing import Any
def UpperCamelCase ( a , a ) -> bool:
'''simple docstring'''
# Validation
if not isinstance(a , a ) or len(a ) == 0:
raise ValueError('''the string should be not empty string''' )
if not isinstance(a , a ) or not all(
isinstance(a , a ) and len(a ) > 0 for item in words ):
raise ValueError('''the words should be a list of non-empty strings''' )
# Build trie
__magic_name__ = {}
__magic_name__ = '''WORD_KEEPER'''
for word in words:
__magic_name__ = trie
for c in word:
if c not in trie_node:
__magic_name__ = {}
__magic_name__ = trie_node[c]
__magic_name__ = True
__magic_name__ = len(a )
# Dynamic programming method
@functools.cache
def is_breakable(a ) -> bool:
if index == len_string:
return True
__magic_name__ = trie
for i in range(a , a ):
__magic_name__ = trie_node.get(string[i] , a )
if trie_node is None:
return False
if trie_node.get(a , a ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self : str , a__ : Union[str, Any] , a__ : Dict=13 , a__ : List[str]=32 , a__ : List[Any]=2 , a__ : List[str]=3 , a__ : Union[str, Any]=16 , a__ : Dict=[1, 2, 1] , a__ : Optional[Any]=[2, 2, 4] , a__ : List[str]=2 , a__ : Optional[Any]=2.0 , a__ : Union[str, Any]=True , a__ : int=0.0 , a__ : int=0.0 , a__ : Tuple=0.1 , a__ : List[str]="gelu" , a__ : str=False , a__ : Optional[int]=True , a__ : List[Any]=0.02 , a__ : Any=1E-5 , a__ : int=True , a__ : List[Any]=None , a__ : Dict=True , a__ : Optional[int]=10 , a__ : Any=8 , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = embed_dim
__magic_name__ = depths
__magic_name__ = num_heads
__magic_name__ = window_size
__magic_name__ = mlp_ratio
__magic_name__ = qkv_bias
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = drop_path_rate
__magic_name__ = hidden_act
__magic_name__ = use_absolute_embeddings
__magic_name__ = patch_norm
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = is_training
__magic_name__ = scope
__magic_name__ = use_labels
__magic_name__ = type_sequence_label_size
__magic_name__ = encoder_stride
def snake_case__ ( self : List[Any] ):
__magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : Optional[int] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case__ ( self : Optional[int] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Optional[int] ):
__magic_name__ = SwinvaModel(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
__magic_name__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__magic_name__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case__ ( self : Optional[Any] , a__ : Optional[Any] , a__ : str , a__ : int ):
__magic_name__ = SwinvaForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ = 1
__magic_name__ = SwinvaForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
__magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case__ ( self : List[str] , a__ : List[str] , a__ : List[Any] , a__ : Any ):
__magic_name__ = self.type_sequence_label_size
__magic_name__ = SwinvaForImageClassification(a__ )
model.to(a__ )
model.eval()
__magic_name__ = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs
__magic_name__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :int = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE :Tuple = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
__SCREAMING_SNAKE_CASE :List[Any] = False
__SCREAMING_SNAKE_CASE :Dict = False
__SCREAMING_SNAKE_CASE :Union[str, Any] = False
def snake_case__ ( self : str ):
__magic_name__ = SwinvaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=a__ , embed_dim=37 )
def snake_case__ ( self : Tuple ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[Any] ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def snake_case__ ( self : str ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def snake_case__ ( self : Union[str, Any] ):
pass
def snake_case__ ( self : Optional[int] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ = model_class(a__ )
__magic_name__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ = [*signature.parameters.keys()]
__magic_name__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def snake_case__ ( self : int ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = True
for model_class in self.all_model_classes:
__magic_name__ = True
__magic_name__ = False
__magic_name__ = True
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.attentions
__magic_name__ = len(self.model_tester.depths )
self.assertEqual(len(a__ ) , a__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__magic_name__ = True
__magic_name__ = config.window_size**2
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__magic_name__ = len(a__ )
# Check attention is always last and order is fine
__magic_name__ = True
__magic_name__ = True
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
__magic_name__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__magic_name__ = 2
self.assertEqual(out_len + added_hidden_states , len(a__ ) )
__magic_name__ = outputs.attentions
self.assertEqual(len(a__ ) , a__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def snake_case__ ( self : Any , a__ : Dict , a__ : str , a__ : str , a__ : List[Any] ):
__magic_name__ = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
__magic_name__ = model(**self._prepare_for_class(a__ , a__ ) )
__magic_name__ = outputs.hidden_states
__magic_name__ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swinv2 has a different seq_length
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__magic_name__ = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = reshaped_hidden_states[0].shape
__magic_name__ = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case__ ( self : List[Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__magic_name__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__magic_name__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__magic_name__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
def snake_case__ ( self : str ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def snake_case__ ( self : Dict ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def snake_case__ ( self : Any ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ = SwinvaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def snake_case__ ( self : List[str] ):
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = _config_zero_init(a__ )
for model_class in self.all_model_classes:
__magic_name__ = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def snake_case__ ( self : Optional[Any] ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def snake_case__ ( self : Optional[int] ):
__magic_name__ = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
a__ )
__magic_name__ = self.default_image_processor
__magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__magic_name__ = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
__magic_name__ = model(**a__ )
# verify the logits
__magic_name__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__magic_name__ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 98 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Any = """bert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : str = hidden_act
lowerCamelCase : int = intermediate_size
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Any = type_vocab_size
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Tuple = position_embedding_type
lowerCamelCase : Optional[Any] = use_cache
lowerCamelCase : int = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 287 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Tuple = """camembert"""
def __init__( self , __magic_name__=3_0_5_2_2 , __magic_name__=7_6_8 , __magic_name__=1_2 , __magic_name__=1_2 , __magic_name__=3_0_7_2 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__="absolute" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Any = position_embedding_type
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE):
@property
def UpperCamelCase__ ( self ):
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 287 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {"configuration_mmbt": ["MMBTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
def A ( _UpperCAmelCase : int ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_UpperCAmelCase = 1
if upper_limit > 0:
_UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_UpperCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
UpperCAmelCase__ = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 290 | 0 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaPhonemeCTCTokenizer
SCREAMING_SNAKE_CASE : List[str] = False
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
super().setUp()
__lowercase = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
__lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
__lowercase = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str]=False ,lowercase__ : Optional[int]=2_0 ,lowercase__ : Dict=5 ):
__lowercase = [(i, tokenizer.decode([i] ,clean_up_tokenization_spaces=lowercase__ )) for i in range(len(lowercase__ ) )]
__lowercase = list(filter(lambda lowercase__ : [t[0]] == tokenizer.encode(t[1] ,do_phonemize=lowercase__ ) ,lowercase__ ) )
if max_length is not None and len(lowercase__ ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowercase__ ) < min_length and len(lowercase__ ) > 0:
while len(lowercase__ ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowercase__ ,clean_up_tokenization_spaces=lowercase__ )
if " " not in output_txt and len(lowercase__ ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=lowercase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=lowercase__ )
)
if with_prefix_space:
__lowercase = ''' ''' + output_txt
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE ( self : Dict ,**lowercase__ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
__lowercase = tokenizer('''m xxx ɪ''' ,do_phonemize=lowercase__ ).input_ids
self.assertEqual(lowercase__ ,[1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
__lowercase = tokenizer('''m aaa ɪ ccc''' ,do_phonemize=lowercase__ ).input_ids
self.assertEqual(lowercase__ ,[1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer('''maɪ c''' ,do_phonemize=lowercase__ ).input_ids
self.assertEqual(lowercase__ ,[3, 2_0_0] ) # mai should be <unk> (=3)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__lowercase = '''Hello how are you'''
__lowercase = tokenizer.phonemize(lowercase__ ,phonemizer_lang='''en-us''' )
self.assertEqual(lowercase__ ,'''h ə l oʊ h aʊ ɑːɹ j uː''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__lowercase = '''Hello how are you'''
__lowercase = tokenizer.phonemize(lowercase__ ,phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(lowercase__ ).input_ids ,tokenizer(lowercase__ ,do_phonemize=lowercase__ ).input_ids )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__lowercase = '''Hello how are you'''
__lowercase = tokenizer.phonemize(lowercase__ ,phonemizer_lang='''en-us''' )
__lowercase = tokenizer.decode(tokenizer(lowercase__ ).input_ids )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__lowercase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase__ )
self.assertEqual(lowercase__ ,batch_tokens[0] )
self.assertEqual(lowercase__ ,['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' ,word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__lowercase = '''Hello how are you'''
__lowercase = tokenizer.phonemize(lowercase__ ,phonemizer_lang='''en-us''' )
self.assertEqual(lowercase__ ,'''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' ,word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__lowercase = '''Hello how are you'''
__lowercase = tokenizer.phonemize(lowercase__ ,phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(lowercase__ ).input_ids ,tokenizer(lowercase__ ,do_phonemize=lowercase__ ).input_ids )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' ,word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
__lowercase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(lowercase__ )
self.assertEqual(lowercase__ ,batch_tokens[0] )
self.assertEqual(lowercase__ ,['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] ,filter_word_delimiter_token=lowercase__ )
__lowercase = tokenizer.batch_decode(lowercase__ ,filter_word_delimiter_token=lowercase__ )
self.assertEqual(lowercase__ ,batch_tokens[0] )
self.assertEqual(lowercase__ ,['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' ,word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__lowercase = '''Hello how are you'''
__lowercase = tokenizer.phonemize(lowercase__ ,phonemizer_lang='''en-us''' )
__lowercase = tokenizer.decode(tokenizer(lowercase__ ).input_ids ,filter_word_delimiter_token=lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' ,word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
__lowercase = '''Hello how are you'''
__lowercase = tokenizer.phonemize(lowercase__ ,phonemizer_lang='''en-us''' )
__lowercase = tokenizer.decode(tokenizer(lowercase__ ).input_ids ,filter_word_delimiter_token=lowercase__ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' ,word_delimiter_token=lowercase__ )
__lowercase = '''Hello how are you'''
__lowercase = tokenizer(lowercase__ ,phonemizer_lang='''en-us''' ).input_ids
__lowercase = tokenizer(lowercase__ ,phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(lowercase__ ,lowercase__ )
__lowercase = tokenizer.decode(lowercase__ )
__lowercase = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ ,'''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(lowercase__ ,'''ɛ l o h aʊ a ʁ j u''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
__lowercase = '''Hello how Are you'''
__lowercase = '''hello how are you'''
__lowercase = tokenizer(lowercase__ ).input_ids
__lowercase = tokenizer(lowercase__ ).input_ids
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
__lowercase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
__lowercase = tokenizer.batch_decode(lowercase__ )
self.assertEqual(lowercase__ ,['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any] ):
__lowercase = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
__lowercase = tokenizer.decode(lowercase__ ,output_char_offsets=lowercase__ ,filter_word_delimiter_token=lowercase__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) ,2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(lowercase__ ,lowercase__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] ,'''char''' ) ) ,outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] ,'''char''' ) ,['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] ,'''start_offset''' ) ,[0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] ,'''end_offset''' ) ,[1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(lowercase__ : List[Any] ,lowercase__ : Tuple ):
self.assertTrue(isinstance(lowercase__ ,lowercase__ ) )
self.assertTrue(isinstance(outputs_list[0] ,lowercase__ ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] ,outputs_batch_a['''text'''] )
def recursive_check(lowercase__ : Dict ,lowercase__ : Tuple ):
if isinstance(lowercase__ ,lowercase__ ):
[recursive_check(lowercase__ ,lowercase__ ) for la, la in zip(lowercase__ ,lowercase__ )]
self.assertEqual(lowercase__ ,lowercase__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] ,outputs_batch_a['''char_offsets'''] )
# fmt: off
__lowercase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(lowercase__ ,output_char_offsets=lowercase__ )
__lowercase = [tokenizer.decode(lowercase__ ,output_char_offsets=lowercase__ ) for ids in sample_ids]
check_list_tuples_equal(lowercase__ ,lowercase__ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_tokenizers(do_lower_case=lowercase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase__ )
self.assertNotEqual(lowercase__ ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__lowercase = tokenizer.add_tokens(lowercase__ )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase__ )
self.assertNotEqual(lowercase__ ,0 )
self.assertEqual(lowercase__ ,lowercase__ )
self.assertEqual(lowercase__ ,len(lowercase__ ) )
self.assertEqual(lowercase__ ,all_size + len(lowercase__ ) )
__lowercase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' ,add_special_tokens=lowercase__ )
self.assertGreaterEqual(len(lowercase__ ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
__lowercase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__lowercase = tokenizer.add_special_tokens(lowercase__ )
__lowercase = tokenizer.vocab_size
__lowercase = len(lowercase__ )
self.assertNotEqual(lowercase__ ,0 )
self.assertEqual(lowercase__ ,lowercase__ )
self.assertEqual(lowercase__ ,len(lowercase__ ) )
self.assertEqual(lowercase__ ,all_size_a + len(lowercase__ ) )
__lowercase = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' ,add_special_tokens=lowercase__ )
self.assertGreaterEqual(len(lowercase__ ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE ( self : int ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__lowercase = self.get_tokenizers(fast=lowercase__ ,do_lower_case=lowercase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
__lowercase = tokenizer.convert_tokens_to_string(lowercase__ )
self.assertIsInstance(output['''text'''] ,lowercase__ )
| 104 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : Tuple ,lowercase__ : Dict=1_3 ,lowercase__ : List[str]=3_0 ,lowercase__ : Tuple=2 ,lowercase__ : Optional[int]=3 ,lowercase__ : List[str]=True ,lowercase__ : Tuple=True ,lowercase__ : int=3_2 ,lowercase__ : List[str]=5 ,lowercase__ : Tuple=4 ,lowercase__ : Any=3_7 ,lowercase__ : Any="gelu" ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : Optional[int]=0.1 ,lowercase__ : str=1_0 ,lowercase__ : Optional[int]=0.0_2 ,lowercase__ : Union[str, Any]=3 ,lowercase__ : Optional[int]=0.6 ,lowercase__ : List[Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = mask_ratio
__lowercase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : List[str] ):
__lowercase = ViTMAEModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[Any] ,lowercase__ : int ,lowercase__ : Optional[Any] ):
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
__lowercase = (self.image_size // self.patch_size) ** 2
__lowercase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowercase = 1
__lowercase = ViTMAEForPreTraining(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(lowercase__ )
__lowercase = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Dict = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ViTMAEModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
__lowercase = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowercase = torch.from_numpy(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowercase = pt_noise
super().check_pt_tf_models(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs[0].cpu().numpy()
__lowercase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
__lowercase = model_class.from_pretrained(lowercase__ )
model.to(lowercase__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
# Make sure we don't have nans
__lowercase = after_outputs[0].cpu().numpy()
__lowercase = 0
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ ,1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTMAEModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
__lowercase = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(lowercase__ )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowercase = ViTMAEConfig()
__lowercase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowercase = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__lowercase = model(**lowercase__ ,noise=torch.from_numpy(lowercase__ ).to(device=lowercase__ ) )
# verify the logits
__lowercase = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(lowercase__ ) ,atol=1e-4 ) )
| 104 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = ''
__magic_name__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__magic_name__ = None # compression type in fsspec. ex: "gzip"
__magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , __snake_case = "" , __snake_case = None , __snake_case = None , **__snake_case ):
super().__init__(self , **__snake_case )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
__snake_case , mode='''rb''' , protocol=__snake_case , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case = os.path.basename(self.file.path.split('''::''' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def a_ ( cls , __snake_case ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(__snake_case ).lstrip('''/''' )
def a_ ( self ):
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
snake_case = {f['''name''']: f}
def a_ ( self , __snake_case ):
return self.file.open().read()
def a_ ( self , __snake_case , __snake_case = "rb" , __snake_case=None , __snake_case=True , __snake_case=None , **__snake_case , ):
snake_case = self._strip_protocol(__snake_case )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'bz2'
__magic_name__ = 'bz2'
__magic_name__ = '.bz2'
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'gzip'
__magic_name__ = 'gzip'
__magic_name__ = '.gz'
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'lz4'
__magic_name__ = 'lz4'
__magic_name__ = '.lz4'
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'xz'
__magic_name__ = 'xz'
__magic_name__ = '.xz'
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'zstd'
__magic_name__ = 'zstd'
__magic_name__ = '.zst'
def __init__( self , __snake_case , __snake_case = "rb" , __snake_case = None , __snake_case = None , __snake_case = DEFAULT_BLOCK_SIZE , **__snake_case , ):
super().__init__(
fo=__snake_case , mode=__snake_case , target_protocol=__snake_case , target_options=__snake_case , block_size=__snake_case , **__snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = file_
def __enter__( self ):
self._file.__enter__()
return self
def __exit__( self , *__snake_case , **__snake_case ):
self._file.__exit__(*__snake_case , **__snake_case )
def __iter__( self ):
return iter(self._file )
def a_ ( self ):
return next(self._file )
def __getattr__( self , __snake_case ):
return getattr(self._file , __snake_case )
def fixed_enter(*__snake_case , **__snake_case ):
return WrappedFile(_enter(*__snake_case , **__snake_case ) )
snake_case = fixed_enter
| 213 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A__ :
"""simple docstring"""
def __init__( self , __snake_case ):
snake_case = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
snake_case = len(__snake_case ) - 1
def a_ ( self , __snake_case ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , __snake_case ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(__snake_case ) , 5 ) == 1
return output_values
def a_ ( self , __snake_case ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
snake_case = self.basis_function(__snake_case )
snake_case = 0.0
snake_case = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def a_ ( self , __snake_case = 0.01 ):
from matplotlib import pyplot as plt # type: ignore
snake_case = [] # x coordinates of points to plot
snake_case = [] # y coordinates of points to plot
snake_case = 0.0
while t <= 1:
snake_case = self.bezier_curve_function(__snake_case )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
snake_case = [i[0] for i in self.list_of_points]
snake_case = [i[1] for i in self.list_of_points]
plt.plot(
__snake_case , __snake_case , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(__snake_case , __snake_case , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 213 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 1 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=99 , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=9 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=8 , _lowerCamelCase=0.1 , _lowerCamelCase=0.002 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=0 , _lowerCamelCase=None , _lowerCamelCase=None , ):
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : List[str] = encoder_seq_length
UpperCAmelCase__ : Tuple = decoder_seq_length
# For common tests
UpperCAmelCase__ : str = self.decoder_seq_length
UpperCAmelCase__ : Optional[int] = is_training
UpperCAmelCase__ : Optional[int] = use_attention_mask
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Dict = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Optional[int] = d_ff
UpperCAmelCase__ : Optional[int] = relative_attention_num_buckets
UpperCAmelCase__ : Dict = dropout_rate
UpperCAmelCase__ : Union[str, Any] = initializer_factor
UpperCAmelCase__ : str = eos_token_id
UpperCAmelCase__ : int = pad_token_id
UpperCAmelCase__ : Dict = decoder_start_token_id
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : List[Any] = decoder_layers
def snake_case__ ( self):
return TaConfig.from_pretrained("""google/umt5-base""")
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
if attention_mask is None:
UpperCAmelCase__ : int = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
UpperCAmelCase__ : Dict = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
UpperCAmelCase__ : Union[str, Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_lowerCamelCase)
if decoder_head_mask is None:
UpperCAmelCase__ : str = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_lowerCamelCase)
if cross_attn_head_mask is None:
UpperCAmelCase__ : int = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=_lowerCamelCase)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase__ : Optional[Any] = input_ids.clamp(self.pad_token_id + 1)
UpperCAmelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1)
UpperCAmelCase__ : Any = self.get_config()
UpperCAmelCase__ : List[Any] = config.num_attention_heads
UpperCAmelCase__ : Dict = self.prepare_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return config, input_dict
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__ ( self):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : Dict = UMTaModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Dict = model(
input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , )
UpperCAmelCase__ : List[str] = model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase)
UpperCAmelCase__ : Tuple = result.last_hidden_state
UpperCAmelCase__ : List[str] = result.past_key_values
UpperCAmelCase__ : List[str] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(_lowerCamelCase) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : List[Any] = UMTaModel(config=_lowerCamelCase).get_decoder().to(_lowerCamelCase).eval()
# first forward pass
UpperCAmelCase__ : Dict = model(_lowerCamelCase , use_cache=_lowerCamelCase)
UpperCAmelCase__ : str = model(_lowerCamelCase)
UpperCAmelCase__ : str = model(_lowerCamelCase , use_cache=_lowerCamelCase)
self.parent.assertTrue(len(_lowerCamelCase) == len(_lowerCamelCase))
self.parent.assertTrue(len(_lowerCamelCase) == len(_lowerCamelCase) + 1)
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
UpperCAmelCase__ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)["""last_hidden_state"""]
UpperCAmelCase__ : int = model(_lowerCamelCase , past_key_values=_lowerCamelCase)["""last_hidden_state"""]
# select random slice
UpperCAmelCase__ : Any = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase__ : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase__ : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , ):
UpperCAmelCase__ : str = UMTaModel(config=_lowerCamelCase).to(_lowerCamelCase).half().eval()
UpperCAmelCase__ : Dict = model(**_lowerCamelCase)["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(_lowerCamelCase).any().item())
@require_torch
class _snake_case ( a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase :str = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowerCAmelCase :Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowerCAmelCase :str = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowerCAmelCase :Union[str, Any] = True
lowerCAmelCase :Any = False
lowerCAmelCase :Optional[int] = False
lowerCAmelCase :Optional[Any] = True
lowerCAmelCase :List[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowerCAmelCase :str = [0.8, 0.9]
def snake_case__ ( self):
UpperCAmelCase__ : Dict = UMTaModelTester(self)
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""")
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = UMTaModel(config_and_inputs[0]).to(_lowerCamelCase)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
_lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=_lowerCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""")
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : List[str] = config_and_inputs[0]
UpperCAmelCase__ : str = UMTaForConditionalGeneration(_lowerCamelCase).eval()
model.to(_lowerCamelCase)
UpperCAmelCase__ : Dict = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=_lowerCamelCase),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowerCamelCase),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=_lowerCamelCase),
}
for attn_name, (name, mask) in zip(_lowerCamelCase , head_masking.items()):
UpperCAmelCase__ : List[Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase__ : Any = torch.ones(
config.num_decoder_layers , config.num_heads , device=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=_lowerCamelCase , return_dict_in_generate=_lowerCamelCase , **_lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase__ : Tuple = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""")
def snake_case__ ( self):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""")
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=_lowerCamelCase).to(_lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=_lowerCamelCase , legacy=_lowerCamelCase)
UpperCAmelCase__ : Tuple = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
UpperCAmelCase__ : str = tokenizer(_lowerCamelCase , return_tensors="""pt""" , padding=_lowerCamelCase).input_ids
# fmt: off
UpperCAmelCase__ : Optional[int] = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
])
# fmt: on
torch.testing.assert_allclose(_lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = model.generate(input_ids.to(_lowerCamelCase))
UpperCAmelCase__ : Dict = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
UpperCAmelCase__ : Optional[Any] = tokenizer.batch_decode(_lowerCamelCase)
self.assertEqual(_lowerCamelCase , _lowerCamelCase) | 283 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A ={
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 283 | 1 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCamelCase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCamelCase = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> Optional[int]:
A: Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowercase )[0]
@deprecated(__lowercase , '''Please use tf.data to implement this functionality.''' )
def SCREAMING_SNAKE_CASE( __lowercase ) -> str:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
A: Union[str, Any] = _readaa(__lowercase )
if magic != 2_0_5_1:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
A: Optional[int] = _readaa(__lowercase )
A: List[str] = _readaa(__lowercase )
A: Optional[int] = _readaa(__lowercase )
A: Any = bytestream.read(rows * cols * num_images )
A: List[str] = numpy.frombuffer(__lowercase , dtype=numpy.uinta )
A: int = data.reshape(__lowercase , __lowercase , __lowercase , 1 )
return data
@deprecated(__lowercase , '''Please use tf.one_hot on tensors.''' )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase ) -> Optional[Any]:
A: int = labels_dense.shape[0]
A: Any = numpy.arange(__lowercase ) * num_classes
A: int = numpy.zeros((num_labels, num_classes) )
A: int = 1
return labels_one_hot
@deprecated(__lowercase , '''Please use tf.data to implement this functionality.''' )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=1_0 ) -> int:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowercase ) as bytestream:
A: Union[str, Any] = _readaa(__lowercase )
if magic != 2_0_4_9:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
A: Dict = _readaa(__lowercase )
A: List[str] = bytestream.read(__lowercase )
A: List[Any] = numpy.frombuffer(__lowercase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowercase , __lowercase )
return labels
class lowerCAmelCase_ :
'''simple docstring'''
@deprecated(
SCREAMING_SNAKE_CASE_ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Tuple=dtypes.floataa , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[str]=None , ) -> Dict:
'''simple docstring'''
A , A: Union[str, Any] = random_seed.get_seed(SCREAMING_SNAKE_CASE_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A: Tuple = dtypes.as_dtype(SCREAMING_SNAKE_CASE_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
A: int = 1_00_00
A: Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
A: int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A: Optional[int] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A: int = images.astype(numpy.floataa )
A: List[Any] = numpy.multiply(SCREAMING_SNAKE_CASE_ , 1.0 / 255.0 )
A: int = images
A: List[Any] = labels
A: List[str] = 0
A: List[str] = 0
@property
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return self._images
@property
def _snake_case ( self : Tuple ) -> Tuple:
'''simple docstring'''
return self._labels
@property
def _snake_case ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self._num_examples
@property
def _snake_case ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self._epochs_completed
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : List[str]=True ) -> Dict:
'''simple docstring'''
if fake_data:
A: List[Any] = [1] * 7_84
A: Any = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE_ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE_ )],
)
A: str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A: Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = self.images[perma]
A: Any = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A: Union[str, Any] = self._num_examples - start
A: int = self._images[start : self._num_examples]
A: Dict = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A: Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE_ )
A: str = self.images[perm]
A: Dict = self.labels[perm]
# Start next epoch
A: List[Any] = 0
A: List[str] = batch_size - rest_num_examples
A: List[str] = self._index_in_epoch
A: Tuple = self._images[start:end]
A: Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A: int = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowercase , '''Please write your own downloading logic.''' )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> str:
if not gfile.Exists(__lowercase ):
gfile.MakeDirs(__lowercase )
A: Tuple = os.path.join(__lowercase , __lowercase )
if not gfile.Exists(__lowercase ):
urllib.request.urlretrieve(__lowercase , __lowercase ) # noqa: S310
with gfile.GFile(__lowercase ) as f:
A: List[str] = f.size()
print('''Successfully downloaded''' , __lowercase , __lowercase , '''bytes.''' )
return filepath
@deprecated(
__lowercase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase=False , __lowercase=False , __lowercase=dtypes.floataa , __lowercase=True , __lowercase=5_0_0_0 , __lowercase=None , __lowercase=DEFAULT_SOURCE_URL , ) -> int:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowercase , one_hot=__lowercase , dtype=__lowercase , seed=__lowercase )
A: int = fake()
A: int = fake()
A: Union[str, Any] = fake()
return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
if not source_url: # empty string check
A: Tuple = DEFAULT_SOURCE_URL
A: Union[str, Any] = '''train-images-idx3-ubyte.gz'''
A: List[Any] = '''train-labels-idx1-ubyte.gz'''
A: Any = '''t10k-images-idx3-ubyte.gz'''
A: Dict = '''t10k-labels-idx1-ubyte.gz'''
A: Dict = _maybe_download(
__lowercase , __lowercase , source_url + train_images_file )
with gfile.Open(__lowercase , '''rb''' ) as f:
A: Optional[int] = _extract_images(__lowercase )
A: Union[str, Any] = _maybe_download(
__lowercase , __lowercase , source_url + train_labels_file )
with gfile.Open(__lowercase , '''rb''' ) as f:
A: Optional[Any] = _extract_labels(__lowercase , one_hot=__lowercase )
A: Any = _maybe_download(
__lowercase , __lowercase , source_url + test_images_file )
with gfile.Open(__lowercase , '''rb''' ) as f:
A: Any = _extract_images(__lowercase )
A: List[Any] = _maybe_download(
__lowercase , __lowercase , source_url + test_labels_file )
with gfile.Open(__lowercase , '''rb''' ) as f:
A: Optional[int] = _extract_labels(__lowercase , one_hot=__lowercase )
if not 0 <= validation_size <= len(__lowercase ):
A: Dict = (
'''Validation size should be between 0 and '''
F"""{len(__lowercase )}. Received: {validation_size}."""
)
raise ValueError(__lowercase )
A: Tuple = train_images[:validation_size]
A: List[str] = train_labels[:validation_size]
A: List[str] = train_images[validation_size:]
A: Optional[Any] = train_labels[validation_size:]
A: Union[str, Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
A: int = _DataSet(__lowercase , __lowercase , **__lowercase )
A: Tuple = _DataSet(__lowercase , __lowercase , **__lowercase )
A: Union[str, Any] = _DataSet(__lowercase , __lowercase , **__lowercase )
return _Datasets(train=__lowercase , validation=__lowercase , test=__lowercase )
| 319 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = WavaVecaPhonemeCTCTokenizer
UpperCamelCase_ : Tuple = False
def _snake_case ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
A: Optional[int] = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
A: Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
A: Dict = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
A: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=False , SCREAMING_SNAKE_CASE_ : Any=20 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 ) -> Tuple[str, list]:
'''simple docstring'''
A: int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )) for i in range(len(SCREAMING_SNAKE_CASE_ ) )]
A: Optional[Any] = list(filter(lambda SCREAMING_SNAKE_CASE_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE_ ) > max_length:
A: int = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE_ ) < min_length and len(SCREAMING_SNAKE_CASE_ ) > 0:
while len(SCREAMING_SNAKE_CASE_ ) < min_length:
A: Dict = toks + toks
# toks_str = [t[1] for t in toks]
A: Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
A: List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE_ ) > 1:
A: int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
)
if with_prefix_space:
A: Tuple = ''' ''' + output_txt
A: List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
return output_txt, output_ids
def _snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
A: Any = tokenizer('''m xxx ɪ''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
A: Optional[int] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
A: str = tokenizer('''maɪ c''' , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , [3, 2_00] ) # mai should be <unk> (=3)
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Any = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Any = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def _snake_case ( self : Tuple ) -> Dict:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: List[str] = '''Hello how are you'''
A: Union[str, Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Union[str, Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A: List[str] = tokenizer.decode(sample_ids[0] )
A: List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def _snake_case ( self : Any ) -> Optional[int]:
'''simple docstring'''
A: int = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: List[Any] = '''Hello how are you'''
A: Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def _snake_case ( self : List[str] ) -> int:
'''simple docstring'''
A: Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Optional[Any] = '''Hello how are you'''
A: Any = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE_ , do_phonemize=SCREAMING_SNAKE_CASE_ ).input_ids )
def _snake_case ( self : Dict ) -> Any:
'''simple docstring'''
A: Optional[int] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
A: str = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A: Tuple = tokenizer.decode(sample_ids[0] )
A: Optional[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
A: str = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def _snake_case ( self : int ) -> List[str]:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Union[str, Any] = '''Hello how are you'''
A: Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Any:
'''simple docstring'''
A: Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
A: Any = '''Hello how are you'''
A: List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' )
A: List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A: List[str] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=SCREAMING_SNAKE_CASE_ )
A: List[Any] = '''Hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''en-us''' ).input_ids
A: Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A: Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
A: Any = tokenizer.decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(SCREAMING_SNAKE_CASE_ , '''ɛ l o h aʊ a ʁ j u''' )
def _snake_case ( self : str ) -> str:
'''simple docstring'''
A: str = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
A: str = '''Hello how Are you'''
A: Union[str, Any] = '''hello how are you'''
A: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
A: str = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
A: Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
A: Tuple = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Any = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
A: str = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A: Union[str, Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A: int = tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def _snake_case ( self : Any ) -> List[Any]:
'''simple docstring'''
A: Optional[int] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE_ ) )
# transform list to ModelOutput
A: Dict = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
[recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for la, la in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
A: int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A: List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ )
A: List[Any] = [tokenizer.decode(SCREAMING_SNAKE_CASE_ , output_char_offsets=SCREAMING_SNAKE_CASE_ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def _snake_case ( self : int ) -> int:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def _snake_case ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def _snake_case ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def _snake_case ( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) -> Any:
'''simple docstring'''
A: Any = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: str = tokenizer.vocab_size
A: str = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A: List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
A: List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size + len(SCREAMING_SNAKE_CASE_ ) )
A: Any = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A: str = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
A: int = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = tokenizer.vocab_size
A: Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , all_size_a + len(SCREAMING_SNAKE_CASE_ ) )
A: int = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def _snake_case ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) -> Tuple:
'''simple docstring'''
A: List[Any] = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A: Union[str, Any] = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
A: Union[str, Any] = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(output['''text'''] , SCREAMING_SNAKE_CASE_ )
| 319 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
SCREAMING_SNAKE_CASE__ = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
SCREAMING_SNAKE_CASE__ = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
SCREAMING_SNAKE_CASE__ = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return float((preds == labels).mean() )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Any:
UpperCamelCase = simple_accuracy(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase = float(fa_score(y_true=__UpperCamelCase , y_pred=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Optional[Any]:
UpperCamelCase = float(pearsonr(__UpperCamelCase , __UpperCamelCase )[0] )
UpperCamelCase = float(spearmanr(__UpperCamelCase , __UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "stsb":
return pearson_and_spearman(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 367 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> Any:
"""simple docstring"""
UpperCamelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_pad
def A__ ( self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
if not batched:
UpperCamelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase ,UpperCamelCase = image.size
else:
UpperCamelCase ,UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase = int(self.size["""shortest_edge"""] * h / w )
UpperCamelCase = self.size["""shortest_edge"""]
elif w > h:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = int(self.size["""shortest_edge"""] * w / h )
else:
UpperCamelCase = self.size["""shortest_edge"""]
UpperCamelCase = self.size["""shortest_edge"""]
else:
UpperCamelCase = []
for image in image_inputs:
UpperCamelCase ,UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a_ ( lowerCamelCase , unittest.TestCase ):
lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """image_std""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size""" ) )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
UpperCamelCase ,UpperCamelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""image_id""": 39769, """annotations""": target}
# encode them
UpperCamelCase = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
@slow
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
UpperCamelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
UpperCamelCase = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
UpperCamelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# verify pixel values
UpperCamelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
UpperCamelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
UpperCamelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _SCREAMING_SNAKE_CASE ) )
# verify masks
UpperCamelCase = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
UpperCamelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _SCREAMING_SNAKE_CASE ) )
# verify size
UpperCamelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _SCREAMING_SNAKE_CASE ) )
| 183 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1_000 , ) -> Optional[Any]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = scope
lowerCAmelCase_ = range_bbox
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ = bbox[i, j, 3]
lowerCAmelCase_ = bbox[i, j, 1]
lowerCAmelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ = bbox[i, j, 2]
lowerCAmelCase_ = bbox[i, j, 0]
lowerCAmelCase_ = t
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self ) -> int:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> int:
lowerCAmelCase_ = LiltModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ = model(__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
lowerCAmelCase_ = model(__lowercase , bbox=__lowercase , token_type_ids=__lowercase )
lowerCAmelCase_ = model(__lowercase , bbox=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Union[str, Any]:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LiltForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ = model(
__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> str:
lowerCAmelCase_ = LiltForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCAmelCase_ = model(
__lowercase , bbox=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) = config_and_inputs
lowerCAmelCase_ = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase =(
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase =False
_lowercase =False
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
return True
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = LiltModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def __a ( self ) -> Dict:
self.config_tester.run_common_tests()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*__lowercase )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def __a ( self ) -> int:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = LiltModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_torch
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> Dict:
lowerCAmelCase_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(__lowercase )
lowerCAmelCase_ = torch.tensor([[1, 2]] , device=__lowercase )
lowerCAmelCase_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(input_ids=__lowercase , bbox=__lowercase )
lowerCAmelCase_ = torch.Size([1, 2, 768] )
lowerCAmelCase_ = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=__lowercase , )
self.assertTrue(outputs.last_hidden_state.shape , __lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __lowercase , atol=1e-3 ) )
| 231 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowercase : int =logging.get_logger(__name__)
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase=None ) -> List[Any]:
"""simple docstring"""
if not conversation_id:
a__ : Dict = uuid.uuida()
if past_user_inputs is None:
a__ : List[str] = []
if generated_responses is None:
a__ : List[str] = []
a__ : uuid.UUID = conversation_id
a__ : List[str] = past_user_inputs
a__ : List[str] = generated_responses
a__ : Optional[str] = text
def __eq__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = False ) -> str:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
a__ : int = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
a__ : str = text
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
a__ : Union[str, Any] = None
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
self.generated_responses.append(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> int:
"""simple docstring"""
a__ : Optional[int] = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
a__ : Dict = """user""" if is_user else """bot"""
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
A__ , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> Dict:
"""simple docstring"""
super().__init__(*__lowercase , **__lowercase )
if self.tokenizer.pad_token_id is None:
a__ : Optional[Any] = self.tokenizer.eos_token
def SCREAMING_SNAKE_CASE__( self , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase ) -> int:
"""simple docstring"""
a__ : Dict = {}
a__ : List[str] = {}
a__ : Optional[int] = {}
if min_length_for_response is not None:
a__ : List[str] = min_length_for_response
if minimum_tokens is not None:
a__ : str = minimum_tokens
if "max_length" in generate_kwargs:
a__ : str = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
a__ : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __lowercase , __lowercase=0 , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = super().__call__(__lowercase , num_workers=__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) == 1:
return outputs[0]
return outputs
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=3_2 ) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
a__ : Dict = self.tokenizer._build_conversation_input_ids(__lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
a__ : Dict = self._legacy_parse_and_tokenize(__lowercase )
if self.framework == "pt":
a__ : Tuple = torch.LongTensor([input_ids] )
elif self.framework == "tf":
a__ : List[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=1_0 , **__lowercase ) -> Any:
"""simple docstring"""
a__ : List[str] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
a__ : Tuple = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
a__ : Tuple = max_length - minimum_tokens
a__ : Dict = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
a__ : Optional[int] = model_inputs["""attention_mask"""][:, -trim:]
a__ : str = model_inputs.pop("""conversation""" )
a__ : str = max_length
a__ : Dict = self.model.generate(**__lowercase , **__lowercase )
if self.model.config.is_encoder_decoder:
a__ : Optional[int] = 1
else:
a__ : List[Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=True ) -> str:
"""simple docstring"""
a__ : int = model_outputs["""output_ids"""]
a__ : Union[str, Any] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , )
a__ : List[str] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__lowercase )
return conversation
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Dict:
"""simple docstring"""
a__ : Any = self.tokenizer.eos_token_id
a__ : Any = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) )
if len(__lowercase ) > self.tokenizer.model_max_length:
a__ : Dict = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 170 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( A : str , A : str ):
'''simple docstring'''
def get_matched_characters(A : str , A : str ) -> str:
UpperCAmelCase = []
UpperCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCAmelCase = int(max(0 , i - limit ) )
UpperCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(A )
UpperCAmelCase = f"""{_stra[0:_stra.index(A )]} {_stra[_stra.index(A ) + 1:]}"""
return "".join(A )
# matching characters
UpperCAmelCase = get_matched_characters(A , A )
UpperCAmelCase = get_matched_characters(A , A )
UpperCAmelCase = len(A )
# transposition
UpperCAmelCase = (
len([(ca, ca) for ca, ca in zip(A , A ) if ca != ca] ) // 2
)
if not match_count:
UpperCAmelCase = 0.0
else:
UpperCAmelCase = (
1
/ 3
* (
match_count / len(A )
+ match_count / len(A )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 91 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( A : str="" ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
return os.path.join(A , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = AgentAudio(lowerCAmelCase )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase = sf.read(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , torch.tensor(lowerCAmelCase ) , atol=1E-4 ) )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCAmelCase , lowerCAmelCase , 16000 )
UpperCAmelCase = AgentAudio(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase )
@require_vision
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.randint(0 , 256 , (64, 64, 3) )
UpperCAmelCase = AgentImage(lowerCAmelCase )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCAmelCase = Image.open(lowerCAmelCase )
UpperCAmelCase = AgentImage(lowerCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCAmelCase = Image.open(lowerCAmelCase )
UpperCAmelCase = AgentImage(lowerCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = '''Hey!'''
UpperCAmelCase = AgentText(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , agent_type.to_string() )
self.assertEqual(lowerCAmelCase , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 91 | 1 |
def A__ ( __lowerCamelCase ):
return " ".join(
''''''.join(word[::-1] ) if len(__lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 299 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__UpperCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] ).to(_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
@property
def _UpperCamelCase ( self ) -> Tuple:
return (4, 32, 32)
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model.to(_A )
SCREAMING_SNAKE_CASE_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _UpperCamelCase ( self ) -> Dict:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=_A )
model_accelerate.to(_A )
model_accelerate.eval()
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
SCREAMING_SNAKE_CASE_ = model_accelerate(_A , _A )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=_A , low_cpu_mem_usage=_A )
model_normal_load.to(_A )
model_normal_load.eval()
SCREAMING_SNAKE_CASE_ = model_normal_load(_A , _A )['''sample''']
assert torch_all_close(_A , _A , rtol=1E-3 )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(_A )
SCREAMING_SNAKE_CASE_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
SCREAMING_SNAKE_CASE_ = noise.to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([10] * noise.shape[0] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-3 ) )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =UNetaDModel
UpperCAmelCase_ ="sample"
@property
def _UpperCamelCase ( self , _A=(32, 32) ) -> int:
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=_A )
return {"sample": noise, "timestep": time_step}
@property
def _UpperCamelCase ( self ) -> List[str]:
return (3, 32, 32)
@property
def _UpperCamelCase ( self ) -> List[Any]:
return (3, 32, 32)
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1E-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
SCREAMING_SNAKE_CASE_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=_A )
self.assertIsNotNone(_A )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_A )
SCREAMING_SNAKE_CASE_ = self.dummy_input
SCREAMING_SNAKE_CASE_ = floats_tensor((4, 3) + (256, 256) ).to(_A )
SCREAMING_SNAKE_CASE_ = noise
SCREAMING_SNAKE_CASE_ = model(**_A )
assert image is not None, "Make sure output is not None"
@slow
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (256, 256)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(_A )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = torch.ones((batch_size, num_channels) + sizes ).to(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor(batch_size * [1E-4] ).to(_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_A , _A ).sample
SCREAMING_SNAKE_CASE_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(_A , _A , rtol=1E-2 ) )
def _UpperCamelCase ( self ) -> Dict:
# not required for this model
pass
| 299 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a ( __a ) -> List[Tuple[int, ...]]:
'''simple docstring'''
UpperCamelCase__ :List[str] = []
if isinstance(__a , __a ):
for v in tree.values():
shapes.extend(_fetch_dims(__a ) )
elif isinstance(__a , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__a ) )
elif isinstance(__a , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def a ( __a , __a ) -> Tuple[int, ...]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = []
for d in reversed(__a ):
idx.append(flat_idx % d )
UpperCamelCase__ :List[str] = flat_idx // d
return tuple(reversed(__a ) )
@torch.jit.ignore
def a ( __a , __a , __a , __a = None , __a = None , ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(__a ) -> None:
UpperCamelCase__ :List[str] = True
for i in range(len(__a ) ):
UpperCamelCase__ :List[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCamelCase__ :Union[str, Any] = l[reversed_idx]
if start_edges is None:
UpperCamelCase__ :Optional[int] = [s == 0 for s in start]
reduce_edge_list(__a )
if end_edges is None:
UpperCamelCase__ :Tuple = [e == (d - 1) for e, d in zip(__a , __a )]
reduce_edge_list(__a )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__a ) == 0:
return [()]
elif len(__a ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCamelCase__ :List[Tuple[slice, ...]] = []
UpperCamelCase__ :List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__a , __a ):
if s == e:
path_list.append(slice(__a , s + 1 ) )
else:
break
UpperCamelCase__ :Tuple[slice, ...] = tuple(__a )
UpperCamelCase__ :Optional[Any] = len(__a )
# start == end, and we're done
if divergence_idx == len(__a ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ :int = start[divergence_idx]
return tuple(
path + (slice(__a , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCamelCase__ :Optional[Any] = end[divergence_idx]
return tuple(
path + (slice(__a , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCamelCase__ :Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a ( __a , __a , __a , __a ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase__ :int = t.shape[:no_batch_dims]
UpperCamelCase__ :List[str] = list(_flat_idx_to_idx(__a , __a ) )
# _get_minimal_slice_set is inclusive
UpperCamelCase__ :Dict = list(_flat_idx_to_idx(flat_end - 1 , __a ) )
# Get an ordered list of slices to perform
UpperCamelCase__ :Dict = _get_minimal_slice_set(
__a , __a , __a , )
UpperCamelCase__ :Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a ( __a , __a , __a , __a , __a = False , __a = None , __a = False , ) -> Any:
'''simple docstring'''
if not (len(__a ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCamelCase__ :int = [shape[:no_batch_dims] for shape in _fetch_dims(__a )]
UpperCamelCase__ :Union[str, Any] = tuple([max(__a ) for s in zip(*__a )] )
def _prep_inputs(__a ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCamelCase__ :Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCamelCase__ :Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCamelCase__ :int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCamelCase__ :Dict[str, Any] = tensor_tree_map(_prep_inputs , __a )
UpperCamelCase__ :Any = None
if _out is not None:
UpperCamelCase__ :Optional[int] = tensor_tree_map(lambda __a : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCamelCase__ :Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCamelCase__ :str = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__a ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCamelCase__ :Union[str, Any] = 0
UpperCamelCase__ :int = prepped_outputs
for _ in range(__a ):
# Chunk the input
if not low_mem:
UpperCamelCase__ :int = _select_chunk
else:
UpperCamelCase__ :str = partial(
_chunk_slice , flat_start=__a , flat_end=min(__a , i + chunk_size ) , no_batch_dims=len(__a ) , )
UpperCamelCase__ :Dict[str, Any] = tensor_tree_map(__a , __a )
# Run the layer on the chunk
UpperCamelCase__ :str = layer(**__a )
# Allocate space for the output
if out is None:
UpperCamelCase__ :Tuple = tensor_tree_map(lambda __a : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __a )
# Put the chunk in its pre-allocated space
if isinstance(__a , __a ):
def assign(__a , __a ) -> None:
for k, v in da.items():
if isinstance(__a , __a ):
assign(__a , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCamelCase__ :Union[str, Any] = da[k]
assign(__a , __a )
elif isinstance(__a , __a ):
for xa, xa in zip(__a , __a ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCamelCase__ :Optional[int] = xa
elif isinstance(__a , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCamelCase__ :Optional[Any] = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCamelCase__ :List[Any] = tensor_tree_map(lambda __a : t.view(orig_batch_dims + t.shape[1:] ) , __a )
return out
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ = 512 , ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = max_chunk_size
UpperCamelCase__ :Optional[int] = None
UpperCamelCase__ :Optional[tuple] = None
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCamelCase__ :List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCamelCase__ :List[Any] = [c for c in candidates if c > min_chunk_size]
UpperCamelCase__ :List[Any] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(UpperCamelCase_ ) -> bool:
try:
with torch.no_grad():
fn(*UpperCamelCase_ , chunk_size=UpperCamelCase_ )
return True
except RuntimeError:
return False
UpperCamelCase__ :Any = 0
UpperCamelCase__ :str = len(UpperCamelCase_ ) - 1
while i > min_viable_chunk_size_index:
UpperCamelCase__ :Dict = test_chunk_size(candidates[i] )
if not viable:
UpperCamelCase__ :List[Any] = (min_viable_chunk_size_index + i) // 2
else:
UpperCamelCase__ :Union[str, Any] = i
UpperCamelCase__ :Optional[int] = (i + len(UpperCamelCase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = True
for aa, aa in zip(UpperCamelCase_ , UpperCamelCase_ ):
assert type(UpperCamelCase_ ) == type(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :List[str] = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase_ : x[0] )]
UpperCamelCase__ :int = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase_ : x[0] )]
consistent &= self._compare_arg_caches(UpperCamelCase_ , UpperCamelCase_ )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :str = True
UpperCamelCase__ :tuple = tree_map(lambda UpperCamelCase_ : a.shape if isinstance(UpperCamelCase_ , torch.Tensor ) else a , UpperCamelCase_ , UpperCamelCase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase_ )
else:
# Otherwise, we can reuse the precomputed value
UpperCamelCase__ :Optional[Any] = False
if not consistent:
UpperCamelCase__ :str = self._determine_favorable_chunk_size(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
UpperCamelCase__ :List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 219 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__snake_case = '''docs/source/en/_toctree.yml'''
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = defaultdict(__a )
UpperCamelCase__ :int = []
UpperCamelCase__ :int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__a )
UpperCamelCase__ :Union[str, Any] = new_doc_list
UpperCamelCase__ :Tuple = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ :Union[str, Any] = []
for duplicate_key in duplicates:
UpperCamelCase__ :Dict = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCamelCase__ :Union[str, Any] = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__a )
# Sort
return overview_doc
def a ( __a=False ) -> Any:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ :str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ :str = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ :Optional[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase__ :List[Any] = api_doc[scheduler_idx]['''sections''']
UpperCamelCase__ :Union[str, Any] = clean_doc_toc(__a )
UpperCamelCase__ :List[Any] = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase__ :Optional[int] = True
if overwrite:
UpperCamelCase__ :Dict = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase__ :Any = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def a ( __a=False ) -> Optional[Any]:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :str = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ :Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ :Any = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ :str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase__ :Any = False
UpperCamelCase__ :Union[str, Any] = api_doc[pipeline_idx]['''sections''']
UpperCamelCase__ :Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase__ :Dict = pipeline_doc['''section''']
UpperCamelCase__ :Optional[Any] = clean_doc_toc(__a )
if overwrite:
UpperCamelCase__ :Optional[int] = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
UpperCamelCase__ :Optional[Any] = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase__ :int = True
if overwrite:
UpperCamelCase__ :Union[str, Any] = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase__ :Dict = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite) | 219 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 196 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowercase, __lowercase= (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
lowerCAmelCase = input('''Enter integers separated by spaces: ''')
lowerCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 295 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : str , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Any=None , UpperCamelCase : Any=None ):
'''simple docstring'''
__UpperCAmelCase : int = start
__UpperCAmelCase : int = end
__UpperCAmelCase : str = val
__UpperCAmelCase : Dict = (start + end) // 2
__UpperCAmelCase : List[str] = left
__UpperCAmelCase : List[Any] = right
def __repr__( self : Tuple ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int , UpperCamelCase : Sequence , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = collection
__UpperCAmelCase : int = function
if self.collection:
__UpperCAmelCase : List[str] = self._build_tree(0 , len(lowercase_ ) - 1 )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
self._update_tree(self.root , lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
return self._query_range(self.root , lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase : int , UpperCamelCase : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(lowercase_ , lowercase_ , self.collection[start] )
__UpperCAmelCase : Dict = (start + end) // 2
__UpperCAmelCase : str = self._build_tree(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = self._build_tree(mid + 1 , lowercase_ )
return SegmentTreeNode(lowercase_ , lowercase_ , self.fn(left.val , right.val ) , lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
if node.start == i and node.end == i:
__UpperCAmelCase : List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase_ , lowercase_ )
else:
self._update_tree(node.right , lowercase_ , lowercase_ )
__UpperCAmelCase : int = self.fn(node.left.val , node.right.val )
def lowerCamelCase__ ( self : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase_ , lowercase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
if self.root is not None:
__UpperCAmelCase : int = Queue()
queue.put(self.root )
while not queue.empty():
__UpperCAmelCase : int = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
UpperCAmelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 358 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
pass
def lowerCamelCase ( _UpperCamelCase : Image ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.array(_UpperCamelCase )
__UpperCAmelCase : List[Any] = npimg.shape
return {"hash": hashimage(_UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__a = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = """facebook/sam-vit-huge"""
__UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase )
__UpperCAmelCase : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 320 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
__A : Optional[int] = int(a__ )
assert noofclusters < len(a__ )
# Find out the dimensionality
__A : Tuple = len(vectors[0] )
# Will help select random centroids from among the available vectors
__A : List[str] = list(range(len(a__ ) ) )
shuffle(a__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__A : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__A : Tuple = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__A : Optional[Any] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__A : List[str] = tf.placeholder('float64' , [dim] )
__A : Any = []
for centroid in centroids:
cent_assigns.append(tf.assign(a__ , a__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__A : Optional[Any] = [tf.Variable(0 ) for i in range(len(a__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__A : Dict = tf.placeholder('int32' )
__A : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(a__ , a__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__A : Dict = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__A : List[str] = tf.reduce_mean(a__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__A : str = tf.placeholder('float' , [dim] )
__A : str = tf.placeholder('float' , [dim] )
__A : str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__A : List[Any] = tf.placeholder('float' , [noofclusters] )
__A : Any = tf.argmin(a__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__A : Tuple = tf.initialize_all_variables()
# Initialize all variables
sess.run(a__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__A : Tuple = 1_00
for _ in range(a__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(a__ ) ):
__A : str = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__A : Union[str, Any] = [
sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__A : List[Any] = sess.run(
a__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(a__ ):
# Collect all the vectors assigned to this cluster
__A : Union[str, Any] = [
vectors[i]
for i in range(len(a__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__A : str = sess.run(
a__ , feed_dict={mean_input: array(a__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__A : Optional[Any] = sess.run(a__ )
__A : Dict = sess.run(a__ )
return centroids, assignments
| 280 |
from __future__ import annotations
def lowerCAmelCase__ ( a__: dict , a__: str ) -> set[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = set(a__ ), [start]
while stack:
_UpperCAmelCase = stack.pop()
explored.add(a__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a__ )
return explored
lowerCAmelCase__ :Tuple = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 329 | 0 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, __magic_name__=0.01, __magic_name__=1000 ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = p_stop
UpperCamelCase__ : Any = max_length
def __iter__( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCamelCase__ : Union[str, Any] = random.random() < self.p_stop
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=False, __magic_name__=True ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = [
BatchSamplerShard(__magic_name__, 2, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
for i in range(2 )
]
UpperCamelCase__ : Union[str, Any] = [list(__magic_name__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__magic_name__ ) for shard in batch_sampler_shards], [len(__magic_name__ ) for e in expected] )
self.assertListEqual(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase__ : Any = BatchSampler(range(24 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : List[str] = BatchSampler(range(24 ), batch_size=3, drop_last=__magic_name__ )
# Expected shouldn't change
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase__ : Optional[int] = BatchSampler(range(21 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : List[str] = BatchSampler(range(21 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase__ : int = BatchSampler(range(22 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : Any = BatchSampler(range(22 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase__ : Union[str, Any] = BatchSampler(range(20 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = BatchSampler(range(20 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
# Check the shards when the dataset is very small.
UpperCamelCase__ : Optional[int] = BatchSampler(range(2 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
UpperCamelCase__ : Optional[int] = BatchSampler(range(2 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [[], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase__ : List[str] = BatchSampler(range(24 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
UpperCamelCase__ : List[Any] = BatchSampler(range(24 ), batch_size=4, drop_last=__magic_name__ )
# Expected shouldn't change
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase__ : Tuple = BatchSampler(range(22 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
UpperCamelCase__ : Dict = BatchSampler(range(22 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase__ : Dict = BatchSampler(range(21 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
UpperCamelCase__ : Tuple = BatchSampler(range(21 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
# Check the shards when the dataset is very small.
UpperCamelCase__ : List[str] = BatchSampler(range(2 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : str = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
UpperCamelCase__ : Optional[Any] = BatchSampler(range(2 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Tuple = [[], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of total batch size.
UpperCamelCase__ : List[Any] = BatchSampler(range(24 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[int] = BatchSampler(range(24 ), batch_size=3, drop_last=__magic_name__ )
# Expected shouldn't change
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCamelCase__ : Tuple = BatchSampler(range(21 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[Any] = BatchSampler(range(21 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCamelCase__ : Union[str, Any] = BatchSampler(range(22 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : int = BatchSampler(range(22 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCamelCase__ : List[Any] = BatchSampler(range(20 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[int] = BatchSampler(range(20 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is very small.
UpperCamelCase__ : List[Any] = BatchSampler(range(2 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[Any] = BatchSampler(range(2 ), batch_size=3, drop_last=__magic_name__ )
UpperCamelCase__ : Tuple = [[], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, even_batches=__magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
# Check the shards when the dataset is a round multiple of batch size.
UpperCamelCase__ : int = BatchSampler(range(24 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Dict = BatchSampler(range(24 ), batch_size=4, drop_last=__magic_name__ )
# Expected shouldn't change
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCamelCase__ : List[Any] = BatchSampler(range(22 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Optional[int] = BatchSampler(range(22 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCamelCase__ : Any = BatchSampler(range(21 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Tuple = BatchSampler(range(21 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
# Check the shards when the dataset is very small.
UpperCamelCase__ : str = BatchSampler(range(2 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
UpperCamelCase__ : Dict = BatchSampler(range(2 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : int = [[], []]
self.check_batch_sampler_shards(__magic_name__, __magic_name__, split_batches=__magic_name__, even_batches=__magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCamelCase__ : Optional[Any] = [BatchSamplerShard(__magic_name__, 2, __magic_name__, even_batches=__magic_name__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ), 3 )
self.assertEqual(len(batch_sampler_shards[1] ), 2 )
self.assertListEqual(list(batch_sampler_shards[0] ), [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ), [[3, 4], [9, 10, 11]] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__=False, __magic_name__=2, __magic_name__=False ) -> str:
"""simple docstring"""
random.seed(__magic_name__ )
UpperCamelCase__ : Dict = list(__magic_name__ )
UpperCamelCase__ : Optional[int] = [
IterableDatasetShard(
__magic_name__, batch_size=__magic_name__, drop_last=__magic_name__, num_processes=__magic_name__, process_index=__magic_name__, split_batches=__magic_name__, )
for i in range(__magic_name__ )
]
UpperCamelCase__ : str = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__magic_name__ )
iterable_dataset_lists.append(list(__magic_name__ ) )
UpperCamelCase__ : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCamelCase__ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__magic_name__ ), len(__magic_name__ ) )
self.assertTrue(len(__magic_name__ ) % shard_batch_size == 0 )
UpperCamelCase__ : int = []
for idx in range(0, len(__magic_name__ ), __magic_name__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__magic_name__ ) < len(__magic_name__ ):
reference += reference
self.assertListEqual(__magic_name__, reference[: len(__magic_name__ )] )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 42
UpperCamelCase__ : Union[str, Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
# Edge case with a very small dataset
UpperCamelCase__ : Union[str, Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
self.check_iterable_dataset_shards(__magic_name__, __magic_name__, batch_size=4, drop_last=__magic_name__, split_batches=__magic_name__ )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : List[str] = BatchSampler(range(16 ), batch_size=4, drop_last=__magic_name__ )
UpperCamelCase__ : List[str] = SkipBatchSampler(__magic_name__, 2 )
self.assertListEqual(list(__magic_name__ ), [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = SkipDataLoader(list(range(16 ) ), batch_size=4, skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = DataLoader(list(range(16 ) ), batch_size=4 )
UpperCamelCase__ : Dict = skip_first_batches(__magic_name__, num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = DataLoaderShard(list(range(16 ) ), batch_size=4 )
for idx, _ in enumerate(__magic_name__ ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__magic_name__ ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
Accelerator()
UpperCamelCase__ : Tuple = DataLoaderDispatcher(range(16 ), batch_size=4 )
for idx, _ in enumerate(__magic_name__ ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__magic_name__ ):
self.assertEqual(dataloader.end_of_dataloader, idx == 3 )
| 247 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 247 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 96 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : str ) -> list[int]:
return [ord(A ) - 9_6 for elem in plain]
def __UpperCAmelCase ( A : list[int] ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : Tuple = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , A )
print('''Decoded:''' , decode(A ) )
if __name__ == "__main__":
main()
| 304 | 0 |
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = 0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 265 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_A : List[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_UpperCAmelCase : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_UpperCAmelCase : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_UpperCAmelCase : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCamelCase ( self : str , A : List[str] , A : Tuple , A : str ) ->List[str]:
lowerCamelCase__ : List[Any] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCamelCase ( self : List[str] , A : Union[str, Any] , A : Optional[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Any = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
# No kwarg
lowerCamelCase__ : str = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
lowerCamelCase__ : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
lowerCamelCase__ : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
A , {'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase__ : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
A , {'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase__ : str = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase__ : Optional[int] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
A , [
{'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
lowerCamelCase__ : Any = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
A , [
{'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(A ):
classifier(A , candidate_labels='''politics''' )
with self.assertRaises(A ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(A ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=A )
with self.assertRaises(A ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(A ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=A , )
self.run_entailment_id(A )
def __lowerCamelCase ( self : Optional[int] , A : Pipeline ) ->Dict:
lowerCamelCase__ : str = zero_shot_classifier.model.config
lowerCamelCase__ : Optional[int] = config.labelaid
lowerCamelCase__ : Tuple = zero_shot_classifier.entailment_id
lowerCamelCase__ : Optional[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCamelCase__ : Optional[int] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase__ : List[str] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase__ : Optional[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCamelCase__ : Tuple = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : List[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
lowerCamelCase__ : Dict = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __lowerCamelCase ( self : str ) ->List[str]:
lowerCamelCase__ : Any = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
lowerCamelCase__ : Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : int = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
lowerCamelCase__ : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowerCamelCase__ : Any = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
lowerCamelCase__ : Dict = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowerCamelCase__ : Union[str, Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 265 | 1 |
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 0 |
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
a = set_counts
a = max(lowerCamelCase_ )
a = len(lowerCamelCase_ )
a = [1] * num_sets
a = list(range(lowerCamelCase_ ) )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.get_parent(lowerCamelCase_ )
a = self.get_parent(lowerCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
a = 0
a = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
a = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
a = 0
a = src_parent
a = self.set_counts[src_parent]
a = max(self.max_set , lowerCamelCase_ )
return True
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
a = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 71 |
import os
def a( ) -> List[str]:
"""simple docstring"""
with open(os.path.dirname(A ) + "/grid.txt" ) as f:
a = [] # noqa: E741
for _ in range(20 ):
l.append([int(A ) for x in f.readline().split()] )
a = 0
# right
for i in range(20 ):
for j in range(17 ):
a = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a = temp
# down
for i in range(17 ):
for j in range(20 ):
a = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a = temp
return maximum
if __name__ == "__main__":
print(solution())
| 71 | 1 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __a ( UpperCAmelCase ):
_a : torch.FloatTensor
_a : Optional[torch.FloatTensor] = None
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Optional[int]=0.999 , a__: List[str]="cosine" , ) -> Union[str, Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(a__: Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a__: List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_UpperCAmelCase = []
for i in range(a__ ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a__ ) / alpha_bar_fn(a__ ) , a__ ) )
return torch.tensor(a__ , dtype=torch.floataa )
class __a ( UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = "fixed_small_log" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = "epsilon" , _SCREAMING_SNAKE_CASE = "squaredcos_cap_v2" , ) -> List[Any]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
_UpperCAmelCase = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0 )
_UpperCAmelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_UpperCAmelCase = 1.0
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = torch.from_numpy(np.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy() )
_UpperCAmelCase = variance_type
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_UpperCAmelCase = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_UpperCAmelCase = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
if prev_timestep is None:
_UpperCAmelCase = t - 1
_UpperCAmelCase = self.alphas_cumprod[t]
_UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_UpperCAmelCase = 1 - alpha_prod_t
_UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_UpperCAmelCase = self.betas[t]
else:
_UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_UpperCAmelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_UpperCAmelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_UpperCAmelCase = torch.log(torch.clamp(_SCREAMING_SNAKE_CASE , min=1e-2_0 ) )
_UpperCAmelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_UpperCAmelCase = variance.log()
_UpperCAmelCase = beta.log()
_UpperCAmelCase = (predicted_variance + 1) / 2
_UpperCAmelCase = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_UpperCAmelCase , _UpperCAmelCase = torch.split(_SCREAMING_SNAKE_CASE , sample.shape[1] , dim=1 )
else:
_UpperCAmelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
_UpperCAmelCase = t - 1
_UpperCAmelCase = self.alphas_cumprod[t]
_UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_UpperCAmelCase = 1 - alpha_prod_t
_UpperCAmelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_UpperCAmelCase = self.betas[t]
_UpperCAmelCase = self.alphas[t]
else:
_UpperCAmelCase = 1 - alpha_prod_t / alpha_prod_t_prev
_UpperCAmelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_UpperCAmelCase = torch.clamp(
_SCREAMING_SNAKE_CASE , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_UpperCAmelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCAmelCase = 0
if t > 0:
_UpperCAmelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=_SCREAMING_SNAKE_CASE , device=model_output.device )
_UpperCAmelCase = self._get_variance(
_SCREAMING_SNAKE_CASE , predicted_variance=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , )
if self.variance_type == "fixed_small_log":
_UpperCAmelCase = variance
elif self.variance_type == "learned_range":
_UpperCAmelCase = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
' for the UnCLIPScheduler.' )
_UpperCAmelCase = variance * variance_noise
_UpperCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCAmelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_UpperCAmelCase = timesteps.to(original_samples.device )
_UpperCAmelCase = alphas_cumprod[timesteps] ** 0.5
_UpperCAmelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_UpperCAmelCase = sqrt_alpha_prod.unsqueeze(-1 )
_UpperCAmelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
_UpperCAmelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_UpperCAmelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_UpperCAmelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 329 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :str , lowercase_ :Dict , lowercase_ :Any , lowercase_ :int )-> Tuple:
A__ = dataset
A__ = process
A__ = params
def __len__( self :Dict )-> Any:
return len(self.dataset )
def __getitem__( self :Union[str, Any] , lowercase_ :int )-> Optional[int]:
A__ = self.dataset[i]
A__ = self.process(lowercase_ , **self.params )
return processed
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :str , lowercase_ :int , lowercase_ :List[Any] , lowercase_ :Optional[int] , lowercase_ :List[Any]=None )-> int:
A__ = loader
A__ = infer
A__ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
A__ = None
A__ = loader_batch_size
# Internal bookkeeping
A__ = None
A__ = None
def __len__( self :str )-> Tuple:
return len(self.loader )
def __iter__( self :List[str] )-> str:
A__ = iter(self.loader )
return self
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
A__ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
A__ = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowercase_ , lowercase_ ):
# Convert ModelOutput to tuple first
A__ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
A__ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A__ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ , lowercase_ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
A__ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
A__ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
A__ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A__ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
A__ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
A__ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
A__ = self._loader_batch_data.__class__(lowercase_ )
self._loader_batch_index += 1
return result
def UpperCAmelCase_ ( self :Tuple )-> Dict:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
A__ = next(self.iterator )
A__ = self.infer(lowercase_ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowercase_ , torch.Tensor ):
A__ = processed
else:
A__ = list(processed.keys() )[0]
A__ = processed[key]
if isinstance(lowercase_ , lowercase_ ):
A__ = len(lowercase_ )
else:
A__ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A__ = observed_batch_size
# Setting internal index to unwrap the batch
A__ = processed
A__ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :List[str] , lowercase_ :Any , lowercase_ :List[Any] , lowercase_ :str , lowercase_ :Dict=None )-> Union[str, Any]:
super().__init__(lowercase_ , lowercase_ , lowercase_ )
def __iter__( self :Optional[int] )-> Dict:
A__ = iter(self.loader )
A__ = None
return self
def UpperCAmelCase_ ( self :str )-> Optional[int]:
if self.subiterator is None:
A__ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
A__ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
A__ = self.infer(next(self.iterator ) , **self.params )
A__ = next(self.subiterator )
return processed
class UpperCAmelCase ( UpperCamelCase__ ):
def __iter__( self :Union[str, Any] )-> List[str]:
A__ = iter(self.loader )
return self
def UpperCAmelCase_ ( self :Optional[Any] )-> Union[str, Any]:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
A__ = False
A__ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
A__ = self.loader_batch_item()
A__ = item.pop("is_last" )
accumulator.append(lowercase_ )
if is_last:
return accumulator
while not is_last:
A__ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowercase_ , torch.Tensor ):
A__ = processed
else:
A__ = list(processed.keys() )[0]
A__ = processed[key]
if isinstance(lowercase_ , lowercase_ ):
A__ = len(lowercase_ )
else:
A__ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
A__ = observed_batch_size
A__ = processed
A__ = 0
while self._loader_batch_index < self.loader_batch_size:
A__ = self.loader_batch_item()
A__ = item.pop("is_last" )
accumulator.append(lowercase_ )
if is_last:
return accumulator
else:
A__ = processed
A__ = item.pop("is_last" )
accumulator.append(lowercase_ )
return accumulator
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :str , lowercase_ :Dataset , lowercase_ :str )-> Optional[Any]:
A__ = dataset
A__ = key
def __len__( self :int )-> Optional[Any]:
return len(self.dataset )
def __getitem__( self :int , lowercase_ :List[Any] )-> Dict:
return self.dataset[i][self.key]
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :str , lowercase_ :Dataset , lowercase_ :str , lowercase_ :str )-> Union[str, Any]:
A__ = dataset
A__ = keya
A__ = keya
def __len__( self :Union[str, Any] )-> str:
return len(self.dataset )
def __getitem__( self :Optional[Any] , lowercase_ :Tuple )-> List[Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 123 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase :
def __init__( self :str , lowercase_ :str , )-> str:
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = False
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = "gelu"
A__ = 0.1
A__ = 0.1
A__ = 5_12
A__ = 16
A__ = 2
A__ = 0.0_2
A__ = 3
A__ = 4
A__ = None
def UpperCAmelCase_ ( self :Union[str, Any] )-> int:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ = ids_tensor([self.batch_size] , self.num_choices )
A__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[int] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :str )-> List[str]:
A__ = TFDistilBertModel(config=lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
A__ = [input_ids, input_mask]
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] )-> Optional[int]:
A__ = TFDistilBertForMaskedLM(config=lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self :Any , lowercase_ :str , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Union[str, Any] )-> Optional[int]:
A__ = TFDistilBertForQuestionAnswering(config=lowercase_ )
A__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :Optional[int] )-> Any:
A__ = self.num_labels
A__ = TFDistilBertForSequenceClassification(lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self :str , lowercase_ :Optional[Any] , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Tuple , lowercase_ :int , lowercase_ :Union[str, Any] )-> str:
A__ = self.num_choices
A__ = TFDistilBertForMultipleChoice(lowercase_ )
A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A__ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self :str , lowercase_ :Any , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :int , lowercase_ :List[Any] , lowercase_ :Tuple )-> Tuple:
A__ = self.num_labels
A__ = TFDistilBertForTokenClassification(lowercase_ )
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
A__ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
A__ = self.prepare_config_and_inputs()
((A__), (A__), (A__), (A__), (A__), (A__)) = config_and_inputs
A__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__lowercase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__lowercase = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]:
A__ = TFDistilBertModelTester(self )
A__ = ConfigTester(self , config_class=lowercase_ , dim=37 )
def UpperCAmelCase_ ( self :Tuple )-> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self :int )-> Tuple:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> str:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Dict:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase_ )
def UpperCAmelCase_ ( self :str )-> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self :List[str] )-> Dict:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A__ = TFDistilBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self :List[Any] )-> Any:
A__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(lowercase_ )[0]
A__ = [1, 6, 7_68]
self.assertEqual(output.shape , lowercase_ )
A__ = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 123 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=7, __magic_name__=3, __magic_name__=10, __magic_name__=18, __magic_name__=30, __magic_name__=400, __magic_name__=True, __magic_name__=None, __magic_name__=True, __magic_name__=[0.5, 0.5, 0.5], __magic_name__=[0.5, 0.5, 0.5], __magic_name__=None, ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = size if size is not None else {'''shortest_edge''': 18}
UpperCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ : str = parent
UpperCamelCase__ : List[Any] = batch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = num_frames
UpperCamelCase__ : List[str] = image_size
UpperCamelCase__ : Any = min_resolution
UpperCamelCase__ : int = max_resolution
UpperCamelCase__ : str = do_resize
UpperCamelCase__ : str = size
UpperCamelCase__ : Any = do_normalize
UpperCamelCase__ : List[Any] = image_mean
UpperCamelCase__ : Any = image_std
UpperCamelCase__ : Any = crop_size
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : List[str] = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Dict = VivitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__, '''image_mean''' ) )
self.assertTrue(hasattr(__magic_name__, '''image_std''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_normalize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__, '''do_center_crop''' ) )
self.assertTrue(hasattr(__magic_name__, '''size''' ) )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size, {'''height''': 18, '''width''': 18} )
UpperCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size, {'''height''': 84, '''width''': 84} )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester, equal_resolution=__magic_name__ )
for video in video_inputs:
self.assertIsInstance(__magic_name__, __magic_name__ )
self.assertIsInstance(video[0], Image.Image )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : Optional[int] = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : int = prepare_video_inputs(self.image_processor_tester, equal_resolution=__magic_name__, numpify=__magic_name__ )
for video in video_inputs:
self.assertIsInstance(__magic_name__, __magic_name__ )
self.assertIsInstance(video[0], np.ndarray )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : int = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Tuple = prepare_video_inputs(self.image_processor_tester, equal_resolution=__magic_name__, torchify=__magic_name__ )
for video in video_inputs:
self.assertIsInstance(__magic_name__, __magic_name__ )
self.assertIsInstance(video[0], torch.Tensor )
# Test not batched input
UpperCamelCase__ : Optional[Any] = image_processing(video_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCamelCase__ : int = image_processing(__magic_name__, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 201 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: PreTrainedTokenizer , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] = None , ) -> List[Any]:
UpperCamelCase__ : Dict = {}
if train_file is not None:
UpperCamelCase__ : str = [train_file]
if eval_file is not None:
UpperCamelCase__ : Union[str, Any] = [eval_file]
if test_file is not None:
UpperCamelCase__ : Tuple = [test_file]
UpperCamelCase__ : Optional[Any] = datasets.load_dataset('''csv''' , data_files=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
UpperCamelCase__ : str = features_name.pop(__UpperCAmelCase )
UpperCamelCase__ : List[str] = list(set(ds[list(files.keys() )[0]][label_name] ) )
UpperCamelCase__ : Optional[Any] = {label: i for i, label in enumerate(__UpperCAmelCase )}
UpperCamelCase__ : Union[str, Any] = tokenizer.model_input_names
UpperCamelCase__ : str = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
UpperCamelCase__ : Optional[int] = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' ) , batched=__UpperCAmelCase , )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
UpperCamelCase__ : Dict = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , ) , batched=__UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
UpperCamelCase__ : Any = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
UpperCamelCase__ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
UpperCamelCase__ : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
UpperCamelCase__ : int = labelaid[ex[label_name]]
yield (d, label)
UpperCamelCase__ : Tuple = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
UpperCamelCase__ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
UpperCamelCase__ : int = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
UpperCamelCase__ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
UpperCamelCase__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
UpperCamelCase__ : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass
class lowercase__ :
'''simple docstring'''
a : int = field(metadata={"help": "Which column contains the label"} )
a : str = field(default=__lowerCamelCase , metadata={"help": "The path of the training file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the development file"} )
a : Optional[str] = field(default=__lowerCamelCase , metadata={"help": "The path of the test file"} )
a : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool = field(
default=__lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowercase__ :
'''simple docstring'''
a : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : bool = field(default=__lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a : Optional[str] = field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase_ ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
UpperCamelCase__ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
UpperCamelCase__ : str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase: EvalPrediction ) -> Dict:
UpperCamelCase__ : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
UpperCamelCase__ : Union[str, Any] = TFTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCamelCase__ : Tuple = trainer.evaluate()
UpperCamelCase__ : Optional[int] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 201 | 1 |
from collections import deque
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> None:
__a = process_name # process name
__a = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__a = arrival_time
__a = burst_time # remaining burst time
__a = 0 # total time of the process wait in ready queue
__a = 0 # time from arrival time to completion time
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
# total number of mlfq's queues
__a = number_of_queues
# time slice of queues that round robin algorithm applied
__a = time_slices
# unfinished process is in this ready_queue
__a = queue
# current time
__a = current_time
# finished process is in this sequence queue
__a = deque()
def __SCREAMING_SNAKE_CASE ( self ) -> list[str]:
__a = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> list[int]:
__a = []
for i in range(len(UpperCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> list[int]:
__a = []
for i in range(len(UpperCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> list[int]:
__a = []
for i in range(len(UpperCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> list[int]:
return [q.burst_time for q in queue]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> deque[Process]:
__a = deque() # sequence deque of finished process
while len(UpperCAmelCase ) != 0:
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__a = 0
# set the process's turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# set the completion time
__a = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase ) -> tuple[deque[Process], deque[Process]]:
__a = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCAmelCase ) ):
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__a = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__a = 0
# set the finish time
__a = self.current_time
# update the process' turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCAmelCase )
self.finish_queue.extend(UpperCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __SCREAMING_SNAKE_CASE ( self ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__a , __a = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowerCamelCase_ : Any = Process("""P1""", 0, 53)
lowerCamelCase_ : Tuple = Process("""P2""", 0, 17)
lowerCamelCase_ : Any = Process("""P3""", 0, 68)
lowerCamelCase_ : Optional[Any] = Process("""P4""", 0, 24)
lowerCamelCase_ : List[Any] = 3
lowerCamelCase_ : int = [17, 25]
lowerCamelCase_ : List[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
lowerCamelCase_ : List[str] = Process("""P1""", 0, 53)
lowerCamelCase_ : int = Process("""P2""", 0, 17)
lowerCamelCase_ : Optional[Any] = Process("""P3""", 0, 68)
lowerCamelCase_ : Optional[Any] = Process("""P4""", 0, 24)
lowerCamelCase_ : str = 3
lowerCamelCase_ : List[Any] = [17, 25]
lowerCamelCase_ : Optional[int] = deque([Pa, Pa, Pa, Pa])
lowerCamelCase_ : Any = MLFQ(number_of_queues, time_slices, queue, 0)
lowerCamelCase_ : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 197 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a__ ( __snake_case ):
A__ : Dict = 'fnet'
def __init__( self , UpperCAmelCase=3_2_0_0_0 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=4 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=False , UpperCAmelCase=5_1_2 , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=2 , **UpperCAmelCase , ) -> Dict:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_tpu_fourier_optimizations
__a = tpu_short_seq_length
| 197 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def A_ ( A__ , A__ ) -> np.array:
a__ : Optional[Any] = F'{sampling_rate}'
a__ : Optional[int] = '1'
a__ : Any = 'f32le'
a__ : Dict = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(A__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
a__ : List[str] = ffmpeg_process.communicate(A__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
a__ : Union[str, Any] = output_stream[0]
a__ : Any = np.frombuffer(A__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def A_ ( A__ , A__ , A__ = "f32le" , ) -> int:
a__ : Optional[int] = F'{sampling_rate}'
a__ : Optional[Any] = '1'
if format_for_conversion == "s16le":
a__ : List[str] = 2
elif format_for_conversion == "f32le":
a__ : int = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
a__ : List[Any] = platform.system()
if system == "Linux":
a__ : Any = 'alsa'
a__ : Optional[Any] = 'default'
elif system == "Darwin":
a__ : Union[str, Any] = 'avfoundation'
a__ : Any = ':0'
elif system == "Windows":
a__ : Union[str, Any] = 'dshow'
a__ : Optional[int] = 'default'
a__ : Dict = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
a__ : List[Any] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
a__ : str = _ffmpeg_stream(A__ , A__ )
for item in iterator:
yield item
def A_ ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ) -> Union[str, Any]:
if stream_chunk_s is not None:
a__ : Union[str, Any] = stream_chunk_s
else:
a__ : Dict = chunk_length_s
a__ : Tuple = ffmpeg_microphone(A__ , A__ , format_for_conversion=A__ )
if format_for_conversion == "s16le":
a__ : str = np.intaa
a__ : Optional[int] = 2
elif format_for_conversion == "f32le":
a__ : Tuple = np.floataa
a__ : Optional[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
a__ : str = chunk_length_s / 6
a__ : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(A__ , (int, float) ):
a__ : Any = [stride_length_s, stride_length_s]
a__ : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
a__ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
a__ : List[str] = datetime.datetime.now()
a__ : Tuple = datetime.timedelta(seconds=A__ )
for item in chunk_bytes_iter(A__ , A__ , stride=(stride_left, stride_right) , stream=A__ ):
# Put everything back in numpy scale
a__ : str = np.frombuffer(item['raw'] , dtype=A__ )
a__ : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
a__ : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def A_ ( A__ , A__ , A__ , A__ = False ) -> Tuple:
a__ : Optional[Any] = B''
a__ , a__ : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
a__ : Optional[Any] = 0
for raw in iterator:
acc += raw
if stream and len(A__ ) < chunk_len:
a__ : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(A__ ) >= chunk_len:
# We are flushing the accumulator
a__ : int = (_stride_left, stride_right)
a__ : Any = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
a__ : int = False
yield item
a__ : Union[str, Any] = stride_left
a__ : int = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(A__ ) > stride_left:
a__ : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
a__ : List[str] = False
yield item
def A_ ( A__ , A__ ) -> Tuple:
a__ : Union[str, Any] = 2**24 # 16Mo
try:
with subprocess.Popen(A__ , stdout=subprocess.PIPE , bufsize=A__ ) as ffmpeg_process:
while True:
a__ : str = ffmpeg_process.stdout.read(A__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 99 |
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( _A = "AAPL" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE__ = BeautifulSoup(requests.get(_A ).text , '''html.parser''' )
SCREAMING_SNAKE_CASE__ = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 314 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , ) -> Any:
"""simple docstring"""
__lowerCamelCase = {
'7z': (seven_zip_file, SevenZipExtractor),
'bz2': (bza_file, BzipaExtractor),
'gzip': (gz_file, GzipExtractor),
'lz4': (lza_file, LzaExtractor),
'tar': (tar_file, TarExtractor),
'xz': (xz_file, XzExtractor),
'zip': (zip_file, ZipExtractor),
'zstd': (zstd_file, ZstdExtractor),
}
__lowerCamelCase , __lowerCamelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
__lowerCamelCase = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
assert base_extractor.is_extractable(UpperCamelCase__ )
__lowerCamelCase = tmp_path / ('extracted' if is_archive else 'extracted.txt')
base_extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__lowerCamelCase = file_path.read_text(encoding='utf-8' )
else:
__lowerCamelCase = output_path.read_text(encoding='utf-8' )
__lowerCamelCase = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' , [
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] , )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = {
'7z': seven_zip_file,
'bz2': bza_file,
'gzip': gz_file,
'lz4': lza_file,
'tar': tar_file,
'xz': xz_file,
'zip': zip_file,
'zstd': zstd_file,
}
__lowerCamelCase = input_paths[compression_format]
if input_path is None:
__lowerCamelCase = F"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase__ )
__lowerCamelCase = Extractor.infer_extractor_format(UpperCamelCase__ )
assert extractor_format is not None
__lowerCamelCase = tmp_path / ('extracted' if is_archive else 'extracted.txt')
Extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
__lowerCamelCase = file_path.read_text(encoding='utf-8' )
else:
__lowerCamelCase = output_path.read_text(encoding='utf-8' )
__lowerCamelCase = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
import tarfile
__lowerCamelCase = tmp_path / 'data_dot_dot'
directory.mkdir()
__lowerCamelCase = directory / 'tar_file_with_dot_dot.tar'
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join('..' , text_file.name ) )
return path
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
import tarfile
__lowerCamelCase = tmp_path / 'data_sym_link'
directory.mkdir()
__lowerCamelCase = directory / 'tar_file_with_sym_link.tar'
os.symlink('..' , directory / 'subdir' , target_is_directory=UpperCamelCase__ )
with tarfile.TarFile(UpperCamelCase__ , 'w' ) as f:
f.add(str(directory / 'subdir' ) , arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' , [('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] , )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = {
'tar_file_with_dot_dot': tar_file_with_dot_dot,
'tar_file_with_sym_link': tar_file_with_sym_link,
}
__lowerCamelCase = insecure_tar_files[insecure_tar_file]
__lowerCamelCase = tmp_path / 'extracted'
TarExtractor.extract(UpperCamelCase__ , UpperCamelCase__ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = tmpdir / 'not_a_zip_file'
# From: https://github.com/python/cpython/pull/5053
__lowerCamelCase = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'
b'\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'
b'DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'
b'\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'
)
with not_a_zip_file.open('wb' ) as f:
f.write(UpperCamelCase__ )
assert zipfile.is_zipfile(str(UpperCamelCase__ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase__ ) # but we're right
| 364 |
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = [False] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : int , UpperCamelCase__ : int ):
__lowerCamelCase = True
__lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__A = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 348 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Union[str, Any] = """ViltImageProcessor"""
lowercase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
A_ : Dict = kwargs.pop('feature_extractor' )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
A_ : List[str] = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor | 286 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
A : List[Any] = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
A : List[Any] = {
"google/electra-small-generator": 5_1_2,
"google/electra-base-generator": 5_1_2,
"google/electra-large-generator": 5_1_2,
"google/electra-small-discriminator": 5_1_2,
"google/electra-base-discriminator": 5_1_2,
"google/electra-large-discriminator": 5_1_2,
}
A : Any = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =VOCAB_FILES_NAMES
__UpperCAmelCase : Any =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] =PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict =ElectraTokenizer
def __init__( self , __a=None , __a=None , __a=True , __a="[UNK]" , __a="[SEP]" , __a="[PAD]" , __a="[CLS]" , __a="[MASK]" , __a=True , __a=None , **__a , ):
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __a ) != do_lower_case
or normalizer_state.get("strip_accents" , __a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __a ) != tokenize_chinese_chars
):
__lowerCAmelCase = getattr(__a , normalizer_state.pop("type" ) )
__lowerCAmelCase = do_lower_case
__lowerCAmelCase = strip_accents
__lowerCAmelCase = tokenize_chinese_chars
__lowerCAmelCase = normalizer_class(**__a )
__lowerCAmelCase = do_lower_case
def snake_case ( self , __a , __a=None ):
__lowerCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 259 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : str = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 259 | 1 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''')
lowerCAmelCase_ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''')
lowerCAmelCase_ : List[Any] = '''xvjiarui/stable-diffusion-2-inpainting'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(A_ , safety_checker=A_)
lowerCAmelCase_ : List[str] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
lowerCAmelCase_ : List[Any] = jax.random.PRNGKey(0)
lowerCAmelCase_ : str = 5_0
lowerCAmelCase_ : List[Any] = jax.device_count()
lowerCAmelCase_ : Union[str, Any] = num_samples * [prompt]
lowerCAmelCase_ : str = num_samples * [init_image]
lowerCAmelCase_ : Union[str, Any] = num_samples * [mask_image]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pipeline.prepare_inputs(A_ , A_ , A_)
# shard inputs and rng
lowerCAmelCase_ : str = replicate(A_)
lowerCAmelCase_ : str = jax.random.split(A_ , jax.device_count())
lowerCAmelCase_ : List[Any] = shard(A_)
lowerCAmelCase_ : str = shard(A_)
lowerCAmelCase_ : Tuple = shard(A_)
lowerCAmelCase_ : int = pipeline(
A_ , A_ , A_ , A_ , A_ , A_ , jit=A_)
lowerCAmelCase_ : Optional[int] = output.images.reshape(A_ , 5_1_2 , 5_1_2 , 3)
lowerCAmelCase_ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
lowerCAmelCase_ : int = jnp.asarray(jax.device_get(image_slice.flatten()))
lowerCAmelCase_ : str = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084])
print(F"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 103 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase : Any = (7_2_0, 1_2_8_0) # Height, Width
_lowercase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase : str = 1 / 1_0_0
_lowercase : Any = ""
_lowercase : Union[str, Any] = ""
_lowercase : Optional[int] = ""
_lowercase : List[Any] = 2_5_0
def snake_case_ ( ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = get_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : str = random.sample(range(len(__SCREAMING_SNAKE_CASE ) ) , 4 )
lowercase_ , lowercase_ , lowercase_ : Any = update_image_and_anno(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , filter_scale=__SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase_ : int = random_chars(32 )
lowercase_ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase_ : int = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowercase_ : List[Any] = []
for anno in new_annos:
lowercase_ : List[Any] = anno[3] - anno[1]
lowercase_ : List[str] = anno[4] - anno[2]
lowercase_ : Dict = anno[1] + width / 2
lowercase_ : Dict = anno[2] + height / 2
lowercase_ : int = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__SCREAMING_SNAKE_CASE )
with open(F'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : Optional[Any] = []
lowercase_ : Optional[Any] = []
for label_file in glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
lowercase_ : int = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__SCREAMING_SNAKE_CASE ) as in_file:
lowercase_ : List[str] = in_file.readlines()
lowercase_ : Optional[Any] = os.path.join(__SCREAMING_SNAKE_CASE , F'''{label_name}.jpg''' )
lowercase_ : Optional[int] = []
for obj_list in obj_lists:
lowercase_ : List[str] = obj_list.rstrip('''\n''' ).split(''' ''' )
lowercase_ : Optional[int] = float(obj[1] ) - float(obj[3] ) / 2
lowercase_ : Any = float(obj[2] ) - float(obj[4] ) / 2
lowercase_ : str = float(obj[1] ) + float(obj[3] ) / 2
lowercase_ : List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__SCREAMING_SNAKE_CASE )
labels.append(__SCREAMING_SNAKE_CASE )
return img_paths, labels
def snake_case_ ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : tuple[int, int] , __SCREAMING_SNAKE_CASE : tuple[float, float] , __SCREAMING_SNAKE_CASE : float = 0.0 , ):
"""simple docstring"""
lowercase_ : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowercase_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowercase_ : Optional[int] = int(scale_x * output_size[1] )
lowercase_ : Dict = int(scale_y * output_size[0] )
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = []
for i, index in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = all_img_list[index]
path_list.append(__SCREAMING_SNAKE_CASE )
lowercase_ : int = all_annos[index]
lowercase_ : Dict = cva.imread(__SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowercase_ : Optional[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowercase_ : Tuple = img
for bbox in img_annos:
lowercase_ : Optional[int] = bbox[1] * scale_x
lowercase_ : Optional[Any] = bbox[2] * scale_y
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Tuple = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowercase_ : Dict = cva.resize(__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowercase_ : Dict = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Dict = bbox[2] * scale_y
lowercase_ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowercase_ : List[Any] = cva.resize(__SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : Any = bbox[1] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : str = bbox[3] * scale_x
lowercase_ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowercase_ : int = cva.resize(
__SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowercase_ : List[str] = img
for bbox in img_annos:
lowercase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowercase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowercase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowercase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowercase_ : Optional[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowercase_ : Any = ascii_lowercase + digits
return "".join(random.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 93 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Tuple = "xlm-roberta"
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.0_2 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ) -> int:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = classifier_dropout
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 352 |
'''simple docstring'''
import numpy
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ ) -> None:
lowerCAmelCase = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase = numpy.zeros(output_array.shape )
def __snake_case ( self ) -> numpy.ndarray:
lowerCAmelCase = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def __snake_case ( self ) -> None:
lowerCAmelCase = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def __snake_case ( self , A_ , A_ , A_ ) -> None:
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def __snake_case ( self , A_ ) -> int:
lowerCAmelCase = input_arr
lowerCAmelCase = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _SCREAMING_SNAKE_CASE : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _SCREAMING_SNAKE_CASE : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def _snake_case ( ) -> int:
"""simple docstring"""
lowerCAmelCase = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase = TwoHiddenLayerNeuralNetwork(
input_array=_SCREAMING_SNAKE_CASE , output_array=_SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_SCREAMING_SNAKE_CASE , iterations=10 , give_loss=_SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 187 | 0 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
snake_case : Optional[Any] = {}
if top_k is not None:
snake_case : Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[str] = load_image(SCREAMING_SNAKE_CASE )
snake_case : Dict = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
snake_case : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
snake_case : int = model_outputs.logits.softmax(-1 )[0]
snake_case , snake_case : List[str] = probs.topk(SCREAMING_SNAKE_CASE )
elif self.framework == "tf":
snake_case : int = stable_softmax(model_outputs.logits , axis=-1 )[0]
snake_case : List[Any] = tf.math.top_k(SCREAMING_SNAKE_CASE , k=SCREAMING_SNAKE_CASE )
snake_case , snake_case : Tuple = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case : Any = scores.tolist()
snake_case : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 148 |
"""simple docstring"""
from collections import deque
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : int = process_name # process name
snake_case : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
snake_case : Tuple = arrival_time
snake_case : Optional[int] = burst_time # remaining burst time
snake_case : int = 0 # total time of the process wait in ready queue
snake_case : List[Any] = 0 # time from arrival time to completion time
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : str = number_of_queues
# time slice of queues that round robin algorithm applied
snake_case : Any = time_slices
# unfinished process is in this ready_queue
snake_case : Tuple = queue
# current time
snake_case : List[Any] = current_time
# finished process is in this sequence queue
snake_case : deque[Process] = deque()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE ) != 0:
snake_case : Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
snake_case : Union[str, Any] = 0
# set the process's turnaround time because it is finished
snake_case : Any = self.current_time - cp.arrival_time
# set the completion time
snake_case : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE )
self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
snake_case : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
snake_case : Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
snake_case : List[str] = 0
# set the finish time
snake_case : List[Any] = self.current_time
# update the process' turnaround time because it is finished
snake_case : Union[str, Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE )
self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase_ ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
snake_case , snake_case : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__A = Process("P1", 0, 53)
__A = Process("P2", 0, 17)
__A = Process("P3", 0, 68)
__A = Process("P4", 0, 24)
__A = 3
__A = [17, 25]
__A = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
__A = Process("P1", 0, 53)
__A = Process("P2", 0, 17)
__A = Process("P3", 0, 68)
__A = Process("P4", 0, 24)
__A = 3
__A = [17, 25]
__A = deque([Pa, Pa, Pa, Pa])
__A = MLFQ(number_of_queues, time_slices, queue, 0)
__A = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 148 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Union[str, Any] = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 130 |
def a_ ( __lowercase : list[int] , __lowercase : list[int] ) -> tuple[float, float]:
# Check if the input is valid
if not len(__lowercase ) == len(__lowercase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_snake_case , _snake_case , _snake_case = equationa
_snake_case , _snake_case , _snake_case = equationa
# Calculate the determinants of the matrices
_snake_case = aa * ba - aa * ba
_snake_case = ca * ba - ca * ba
_snake_case = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case = determinant_x / determinant
_snake_case = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 130 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase : Dict = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : int=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE__ = random.Random()
SCREAMING_SNAKE_CASE__ = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE__ = []
for _ in range(lowerCAmelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE__ = np.array(lowerCAmelCase_ , dtype=jnp.intaa ).reshape(lowerCAmelCase_ )
return output
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor(lowerCAmelCase_ , vocab_size=2 , rng=lowerCAmelCase_ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE__ = 1
return attn_mask
@require_flax
class __snake_case :
lowerCAmelCase_ = None
lowerCAmelCase_ = ()
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = inputs['input_ids'].shape[-1] // 2
SCREAMING_SNAKE_CASE__ = inputs['input_ids'][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE__ = jnp.ones_like(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pt_model_class(_UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE__ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , flax_model.params )
SCREAMING_SNAKE_CASE__ = flax_model.generate(_UpperCAmelCase ).sequences
SCREAMING_SNAKE_CASE__ = pt_model.generate(torch.tensor(_UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 0.8
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 0.3
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __snake_case ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
SCREAMING_SNAKE_CASE__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
SCREAMING_SNAKE_CASE__ = 'Hello world'
SCREAMING_SNAKE_CASE__ = tokenizer(_UpperCAmelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase , """do_samples""" ):
model.generate(_UpperCAmelCase , do_samples=_UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase , """foo""" ):
SCREAMING_SNAKE_CASE__ = {'foo': 'bar'}
model.generate(_UpperCAmelCase , **_UpperCAmelCase )
| 219 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class __A ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = ['audio_values', 'audio_mask']
def __init__( self , __A=2048 , __A=1 , __A=[16, 16] , __A=128 , __A=4_4100 , __A=86 , __A=2048 , __A=0.0 , **__A , ) -> Union[str, Any]:
super().__init__(
feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase , )
a =spectrogram_length
a =num_channels
a =patch_size
a =feature_size // self.patch_size[1]
a =n_fft
a =sampling_rate // hop_length_to_sampling_rate
a =sampling_rate
a =padding_value
a =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=_UpperCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[int]:
a =spectrogram(
_UpperCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
a =log_spec[:, :-1]
a =log_spec - 20.0
a =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __A , __A = None , __A = True , __A = None , __A = False , __A = False , **__A , ) -> List[Any]:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
a =isinstance(_UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
a =is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
a =np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a =[np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
a =[
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _UpperCAmelCase ):
a =[np.asarray(_UpperCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
a =max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
a =[
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
a =np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
a =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
a =np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
a =padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
a =audio_features[i]
a =feature
# return as BatchFeature
if return_attention_mask:
a ={'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
a ={'audio_values': padded_audio_features}
a =BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
return encoded_inputs | 369 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "WhisperFeatureExtractor"
__lowerCAmelCase = "WhisperTokenizer"
def __init__( self , __A , __A ) -> Dict:
super().__init__(__A , __A )
a =self.feature_extractor
a =False
def SCREAMING_SNAKE_CASE ( self , __A=None , __A=None , __A=True ) -> int:
return self.tokenizer.get_decoder_prompt_ids(task=__A , language=__A , no_timestamps=__A )
def __call__( self , *__A , **__A ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
a =kwargs.pop('''audio''' , __A )
a =kwargs.pop('''sampling_rate''' , __A )
a =kwargs.pop('''text''' , __A )
if len(__A ) > 0:
a =args[0]
a =args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
a =self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
a =self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a =encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , *__A , **__A ) -> Union[str, Any]:
return self.tokenizer.decode(*__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(__A , return_tensors=__A ) | 215 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__lowerCamelCase : List[str] = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 219 | from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Dict , _lowercase : CLIPSegForImageSegmentation , _lowercase : CLIPSegProcessor , _lowercase : AutoencoderKL , _lowercase : CLIPTextModel , _lowercase : CLIPTokenizer , _lowercase : UNetaDConditionModel , _lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowercase : StableDiffusionSafetyChecker , _lowercase : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowercase , segmentation_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def __a ( self : List[Any] , _lowercase : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def __a ( self : Any ):
"""simple docstring"""
self.enable_attention_slicing(_lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self : Optional[int] ):
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , _lowercase : Union[str, List[str]] , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] , _lowercase : str , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
SCREAMING_SNAKE_CASE__ = self.segmentation_model(**_lowercase )
SCREAMING_SNAKE_CASE__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , )
| 219 | 1 |
def _UpperCamelCase ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError('only integers accepted as input' )
else:
lowerCAmelCase__ = str(abs(UpperCamelCase_ ) )
lowerCAmelCase__ = [list(UpperCamelCase_ ) for char in range(len(UpperCamelCase_ ) )]
for index in range(len(UpperCamelCase_ ) ):
num_transpositions[index].pop(UpperCamelCase_ )
return max(
int(''.join(list(UpperCamelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 122 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__snake_case : Optional[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__snake_case : List[str] = {
"""allenai/led-base-16384""": 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
lowerCAmelCase__ = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCAmelCase__ = bs[:]
lowerCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) )
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="replace" , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=False , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else bos_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else eos_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else sep_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else cls_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else unk_token
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
super().__init__(
errors=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
with open(_UpperCamelCase , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase__ = json.load(_UpperCamelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = errors # how to handle errors in decoding
lowerCAmelCase__ = bytes_to_unicode()
lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCamelCase , encoding='utf-8' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase__ = {}
lowerCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(_UpperCamelCase )
lowerCAmelCase__ = get_pairs(_UpperCamelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(_UpperCamelCase , key=lambda _UpperCamelCase : self.bpe_ranks.get(_UpperCamelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(_UpperCamelCase ):
try:
lowerCAmelCase__ = word.index(_UpperCamelCase , _UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(_UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(_UpperCamelCase )
lowerCAmelCase__ = new_word
if len(_UpperCamelCase ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(_UpperCamelCase )
lowerCAmelCase__ = ' '.join(_UpperCamelCase )
lowerCAmelCase__ = word
return word
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = []
for token in re.findall(self.pat , _UpperCamelCase ):
lowerCAmelCase__ = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCamelCase ).split(' ' ) )
return bpe_tokens
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.decoder.get(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = ''.join(_UpperCamelCase )
lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCamelCase , ensure_ascii=_UpperCamelCase ) + '\n' )
lowerCAmelCase__ = 0
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase__ = token_index
writer.write(' '.join(_UpperCamelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1, 1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=False , **_UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCamelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ = ' ' + text
return (text, kwargs)
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase = None , _UpperCamelCase = None , ):
"""simple docstring"""
lowerCAmelCase__ = super()._pad(
encoded_inputs=_UpperCamelCase , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase__ = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase__ = len(encoded_inputs['global_attention_mask'] ) != len(_UpperCamelCase )
if needs_to_be_padded:
lowerCAmelCase__ = len(_UpperCamelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase__ = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase__ = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 122 | 1 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : int = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: Optional[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: List[str] = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: List[str] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: List[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: Optional[int] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: List[str] = special_tok_ids
__snake_case: str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: List[Any] = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: Tuple = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Tuple = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: int = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Union[str, Any] = student_config_class.from_pretrained(args.student_config)
__snake_case: int = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: str = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 111 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : List[str]=1 , A : str=0 , A : List[Any]=2 , A : Union[str, Any]=512 , A : Tuple="cls" , A : Union[str, Any]=False , A : Optional[Any]=True , **A : Optional[int] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
__snake_case: str = project_dim
__snake_case: Optional[int] = pooler_fn
__snake_case: Dict = learn_encoder
__snake_case: str = use_attention_mask
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = [R"""pooler""", R"""logit_scale"""]
lowerCAmelCase__ = [R"""position_ids""", R"""predictions.decoder.bias"""]
lowerCAmelCase__ = """roberta"""
lowerCAmelCase__ = RobertaSeriesConfig
def __init__( self : Dict , A : Dict ):
super().__init__(A )
__snake_case: Optional[Any] = XLMRobertaModel(A )
__snake_case: List[Any] = nn.Linear(config.hidden_size , config.project_dim )
__snake_case: Optional[int] = getattr(A , """has_pre_transformation""" , A )
if self.has_pre_transformation:
__snake_case: Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
__snake_case: Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[bool] = None , A : Optional[bool] = None , A : Optional[bool] = None , ):
__snake_case: Any = return_dict if return_dict is not None else self.config.use_return_dict
__snake_case: Optional[int] = self.base_model(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , )
if self.has_pre_transformation:
__snake_case: int = outputs["""hidden_states"""][-2]
__snake_case: List[str] = self.pre_LN(A )
__snake_case: List[str] = self.transformation_pre(A )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__snake_case: Optional[int] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 111 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 266 |
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise TypeError("""only integers accepted as input""")
else:
a__ : Any = str(abs(_lowercase))
a__ : str = [list(_lowercase) for char in range(len(_lowercase))]
for index in range(len(_lowercase)):
num_transpositions[index].pop(_lowercase)
return max(
int("""""".join(list(_lowercase))) for transposition in num_transpositions)
if __name__ == "__main__":
__import__("doctest").testmod()
| 266 | 1 |
from ...processing_utils import ProcessorMixin
class lowerCamelCase_ ( _snake_case ):
'''simple docstring'''
a__ : List[Any] = """WhisperFeatureExtractor"""
a__ : Dict = """WhisperTokenizer"""
def __init__( self , __lowercase , __lowercase) -> int:
super().__init__(UpperCamelCase__ , UpperCamelCase__)
__UpperCamelCase :Dict = self.feature_extractor
__UpperCamelCase :Optional[int] = False
def UpperCamelCase__ ( self , __lowercase=None , __lowercase=None , __lowercase=True) -> List[Any]:
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__)
def __call__( self , *__lowercase , **__lowercase) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__)
__UpperCamelCase :Optional[int] = kwargs.pop('''audio''' , UpperCamelCase__)
__UpperCamelCase :Dict = kwargs.pop('''sampling_rate''' , UpperCamelCase__)
__UpperCamelCase :List[Any] = kwargs.pop('''text''' , UpperCamelCase__)
if len(UpperCamelCase__) > 0:
__UpperCamelCase :List[str] = args[0]
__UpperCamelCase :str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
__UpperCamelCase :Dict = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__)
if text is not None:
__UpperCamelCase :Union[str, Any] = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__)
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCamelCase :Any = encodings['''input_ids''']
return inputs
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Optional[Any]:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__)
def UpperCamelCase__ ( self , *__lowercase , **__lowercase) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__)
def UpperCamelCase__ ( self , __lowercase , __lowercase="np") -> List[Any]:
return self.tokenizer.get_prompt_ids(UpperCamelCase__ , return_tensors=UpperCamelCase__)
| 43 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = self.vocab_size - 1
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = OpenAIGPTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = OpenAIGPTForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = inputs_dict["""labels"""]
A_ = inputs_dict["""labels"""]
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = OpenAIGPTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCamelCase__ )
A_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is
A_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
| 162 | 0 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
from transformers.testing_utils import pytest_terminal_summary_main
__a : Any = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 294 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __a :
'''simple docstring'''
def __init__( self , _a=2 , _a=3 , _a=64 , _a=None ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = np.random.default_rng(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Dict = length
SCREAMING_SNAKE_CASE__ : Any = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__( self , _a ) -> Any:
"""simple docstring"""
return {"x": self.x[i], "y": self.y[i]}
class __a (torch.nn.Module):
'''simple docstring'''
def __init__( self , _a=0 , _a=0 , _a=False ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE__ : str = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE__ : Any = True
def _a ( self , _a=None ) -> Optional[int]:
"""simple docstring"""
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE__ : Any = False
return x * self.a[0] + self.b[0]
class __a (torch.nn.Module):
'''simple docstring'''
def __init__( self , _a=0 , _a=0 , _a=False ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : Any = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.Parameter(torch.tensor(__lowerCamelCase ).float() )
SCREAMING_SNAKE_CASE__ : str = True
def _a ( self , _a=None ) -> int:
"""simple docstring"""
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
return x * self.a + self.b
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Optional[Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE__ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : int = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
SCREAMING_SNAKE_CASE__ : Any = load_dataset("""csv""" , data_files=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int = datasets["train"].unique("""label""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: i for i, v in enumerate(UpperCamelCase__ )}
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : str = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE__ : int = datasets.map(
UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : int = DataLoader(tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=2 )
SCREAMING_SNAKE_CASE__ : Any = DataLoader(tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 132 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _a ( UpperCAmelCase__ ):
_lowercase : Any = ['vqvae']
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , mel=_SCREAMING_SNAKE_CASE , vqvae=_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self: Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _SCREAMING_SNAKE_CASE ) else 1_000
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase_: int = 1 , UpperCamelCase_: List[str] = None , UpperCamelCase_: Dict = None , UpperCamelCase_: Optional[Any] = 0 , UpperCamelCase_: Optional[Any] = 0 , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Any = None , UpperCamelCase_: Optional[int] = 0 , UpperCamelCase_: Any = 0 , UpperCamelCase_: Optional[Any] = None , UpperCamelCase_: Dict = 0 , UpperCamelCase_: List[str] = None , UpperCamelCase_: Any = None , UpperCamelCase_: int=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
lowercase__ = steps or self.get_default_steps()
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
lowercase__ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase__ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase__ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_SCREAMING_SNAKE_CASE , device=self.device , )
lowercase__ = noise
lowercase__ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = self.mel.audio_slice_to_image(_SCREAMING_SNAKE_CASE )
lowercase__ = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape(
(input_image.height, input_image.width) )
lowercase__ = (input_image / 255) * 2 - 1
lowercase__ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase__ = self.vqvae.encode(torch.unsqueeze(_SCREAMING_SNAKE_CASE , 0 ) ).latent_dist.sample(
generator=_SCREAMING_SNAKE_CASE )[0]
lowercase__ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase__ = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler.timesteps[start_step - 1] )
lowercase__ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase__ = int(mask_start_secs * pixels_per_second )
lowercase__ = int(mask_end_secs * pixels_per_second )
lowercase__ = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _SCREAMING_SNAKE_CASE ):
lowercase__ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
else:
lowercase__ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
if isinstance(self.scheduler , _SCREAMING_SNAKE_CASE ):
lowercase__ = self.scheduler.step(
model_output=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )["""prev_sample"""]
else:
lowercase__ = self.scheduler.step(
model_output=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
lowercase__ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase__ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase__ = 1 / self.vqvae.config.scaling_factor * images
lowercase__ = self.vqvae.decode(_SCREAMING_SNAKE_CASE )["""sample"""]
lowercase__ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase__ = (images * 255).round().astype('''uint8''' )
lowercase__ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_SCREAMING_SNAKE_CASE , mode='''RGB''' ).convert('''L''' ) for _ in images) )
lowercase__ = [self.mel.image_to_audio(_SCREAMING_SNAKE_CASE ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) , **ImagePipelineOutput(_SCREAMING_SNAKE_CASE ) )
@torch.no_grad()
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Union[str, Any] = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
lowercase__ = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] )
lowercase__ = (sample / 255) * 2 - 1
lowercase__ = torch.Tensor(_SCREAMING_SNAKE_CASE ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase__ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase__ = self.scheduler.alphas_cumprod[t]
lowercase__ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase__ = 1 - alpha_prod_t
lowercase__ = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
lowercase__ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase__ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase__ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Any ) -> torch.Tensor:
"""simple docstring"""
lowercase__ = acos(torch.dot(torch.flatten(_SCREAMING_SNAKE_CASE ) , torch.flatten(_SCREAMING_SNAKE_CASE ) ) / torch.norm(_SCREAMING_SNAKE_CASE ) / torch.norm(_SCREAMING_SNAKE_CASE ) )
return sin((1 - alpha) * theta ) * xa / sin(_SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(_SCREAMING_SNAKE_CASE )
| 360 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase = 'docs/source/en/_toctree.yml'
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = defaultdict(SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(SCREAMING_SNAKE_CASE )
lowercase__ = new_doc_list
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
lowercase__ = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(SCREAMING_SNAKE_CASE )
# Sort
return overview_doc
def _a ( SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__ = api_doc[scheduler_idx]['''sections''']
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
lowercase__ = False
if new_scheduler_doc != scheduler_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_scheduler_doc
if diff:
if overwrite:
lowercase__ = api_doc
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _a ( SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__ = False
lowercase__ = api_doc[pipeline_idx]['''sections''']
lowercase__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__ = pipeline_doc['''section''']
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
if overwrite:
lowercase__ = new_sub_pipeline_doc
new_pipeline_docs.append(SCREAMING_SNAKE_CASE )
# sort overall pipeline doc
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
if new_pipeline_docs != pipeline_docs:
lowercase__ = True
if overwrite:
lowercase__ = new_pipeline_docs
if diff:
if overwrite:
lowercase__ = api_doc
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 93 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =field(
metadata={"help": "The output directory where the model will be written."} , )
SCREAMING_SNAKE_CASE_ : str =field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : str =field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =field(
default=__lowerCamelCase , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def lowercase__ ( ) -> Any:
"""simple docstring"""
__UpperCamelCase = HfArgumentParser((ModelArguments,) )
((__UpperCamelCase) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__UpperCamelCase = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__lowercase , decoder_config=__lowercase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__UpperCamelCase = decoder_config.decoder_start_token_id
__UpperCamelCase = decoder_config.pad_token_id
if decoder_start_token_id is None:
__UpperCamelCase = decoder_config.bos_token_id
if pad_token_id is None:
__UpperCamelCase = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__UpperCamelCase = decoder_config.eos_token_id
__UpperCamelCase = decoder_start_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__UpperCamelCase = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 53 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __UpperCAmelCase ( lowerCamelCase__ ):
def __magic_name__ ( self : str, __A : Optional[int] ):
if isinstance(__A, __A ):
UpperCAmelCase : str = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : Any, __A : Any, __A : Tuple, __A : Dict ):
if len(__A ) == 0 or len(__A ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__A ) )
if isinstance(__A, __A ):
UpperCAmelCase : int = [sequences]
UpperCAmelCase : Dict = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(lowerCamelCase__ )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Union[str, Any], __A : List[str]=ZeroShotClassificationArgumentHandler(), *__A : Optional[int], **__A : Optional[Any] ):
UpperCAmelCase : Optional[Any] = args_parser
super().__init__(*__A, **__A )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def __magic_name__ ( self : Union[str, Any] ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def __magic_name__ ( self : str, __A : int, __A : List[Any]=True, __A : Any=True, __A : List[str]=TruncationStrategy.ONLY_FIRST, **__A : List[Any] ):
UpperCAmelCase : Dict = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
UpperCAmelCase : Optional[int] = self.tokenizer.eos_token
try:
UpperCAmelCase : int = self.tokenizer(
__A, add_special_tokens=__A, return_tensors=__A, padding=__A, truncation=__A, )
except Exception as e:
if "too short" in str(__A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : Dict = self.tokenizer(
__A, add_special_tokens=__A, return_tensors=__A, padding=__A, truncation=TruncationStrategy.DO_NOT_TRUNCATE, )
else:
raise e
return inputs
def __magic_name__ ( self : str, **__A : int ):
if kwargs.get('''multi_class''', __A ) is not None:
UpperCAmelCase : Any = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
UpperCAmelCase : Tuple = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : List[str] = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : int = kwargs['''hypothesis_template''']
UpperCAmelCase : int = {}
if "multi_label" in kwargs:
UpperCAmelCase : List[str] = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any], __A : Union[str, List[str]], *__A : Tuple, **__A : Union[str, Any], ):
if len(__A ) == 0:
pass
elif len(__A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : int = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(__A, **__A )
def __magic_name__ ( self : List[Any], __A : Tuple, __A : Optional[Any]=None, __A : List[str]="This example is {}." ):
UpperCAmelCase : List[Any] = self._args_parser(__A, __A, __A )
for i, (candidate_label, sequence_pair) in enumerate(zip(__A, __A ) ):
UpperCAmelCase : Dict = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__A ) - 1,
**model_input,
}
def __magic_name__ ( self : Dict, __A : Dict ):
UpperCAmelCase : str = inputs['''candidate_label''']
UpperCAmelCase : List[Any] = inputs['''sequence''']
UpperCAmelCase : int = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : List[Any] = self.model(**__A )
UpperCAmelCase : List[str] = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def __magic_name__ ( self : List[str], __A : Optional[Any], __A : int=False ):
UpperCAmelCase : Union[str, Any] = [outputs['''candidate_label'''] for outputs in model_outputs]
UpperCAmelCase : Union[str, Any] = [outputs['''sequence'''] for outputs in model_outputs]
UpperCAmelCase : Any = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
UpperCAmelCase : List[Any] = logits.shape[0]
UpperCAmelCase : Union[str, Any] = len(__A )
UpperCAmelCase : str = N // n
UpperCAmelCase : Dict = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : Union[str, Any] = self.entailment_id
UpperCAmelCase : List[str] = -1 if entailment_id == 0 else 0
UpperCAmelCase : Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : Dict = np.exp(__A ) / np.exp(__A ).sum(-1, keepdims=__A )
UpperCAmelCase : Dict = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : List[str] = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : str = np.exp(__A ) / np.exp(__A ).sum(-1, keepdims=__A )
UpperCAmelCase : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 368 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ShapEImgaImgPipeline
UpperCamelCase = ["""image"""]
UpperCamelCase = ["""image"""]
UpperCamelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def __magic_name__ ( self : Optional[Any] ):
return 3_2
@property
def __magic_name__ ( self : Optional[int] ):
return 3_2
@property
def __magic_name__ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Any ):
return 8
@property
def __magic_name__ ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=6_4, projection_dim=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
UpperCAmelCase : Any = CLIPVisionModel(__A )
return model
@property
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[int] = CLIPImageProcessor(
crop_size=2_2_4, do_center_crop=__A, do_normalize=__A, do_resize=__A, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=2_2_4, )
return image_processor
@property
def __magic_name__ ( self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase : Any = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase : List[Any] = PriorTransformer(**__A )
return model
@property
def __magic_name__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase : List[str] = ShapERenderer(**__A )
return model
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = self.dummy_prior
UpperCAmelCase : List[str] = self.dummy_image_encoder
UpperCAmelCase : List[Any] = self.dummy_image_processor
UpperCAmelCase : Dict = self.dummy_renderer
UpperCAmelCase : int = HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_0_2_4, prediction_type='''sample''', use_karras_sigmas=__A, clip_sample=__A, clip_sample_range=1.0, )
UpperCAmelCase : List[str] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : List[Any], __A : Dict, __A : List[Any]=0 ):
UpperCAmelCase : int = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith('''mps''' ):
UpperCAmelCase : List[str] = torch.manual_seed(__A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
UpperCAmelCase : Dict = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = '''cpu'''
UpperCAmelCase : Any = self.get_dummy_components()
UpperCAmelCase : Tuple = self.pipeline_class(**__A )
UpperCAmelCase : Dict = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__A ) )
UpperCAmelCase : Any = output.images[0]
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
UpperCAmelCase : Optional[int] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = torch_device == '''cpu'''
UpperCAmelCase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=__A, relax_max_difference=__A, )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**__A )
UpperCAmelCase : List[str] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Any = 1
UpperCAmelCase : Any = 2
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase : Any = batch_size * [inputs[key]]
UpperCAmelCase : int = pipe(**__A, num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
UpperCAmelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
UpperCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
UpperCAmelCase : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Dict = torch.Generator(device=__A ).manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
__A, generator=__A, guidance_scale=3.0, num_inference_steps=6_4, frame_size=6_4, output_type='''np''', ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__A, __A )
| 99 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
_UpperCamelCase = value
_UpperCamelCase = None
_UpperCamelCase = None
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Node ) -> None:
'''simple docstring'''
_UpperCamelCase = tree
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Node | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase__ : Optional[Any] = logging.getLogger()
def a__ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def a__ ( lowercase : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(lowercase, '''all_results.json''' )
if os.path.exists(lowercase ):
with open(lowercase, '''r''' ) as f:
_UpperCamelCase = json.load(lowercase )
else:
raise ValueError(F"""can't find {path}""" )
return results
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Tuple ) -> int:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : List[str] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : str ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''translation_no_trainer''' ) ) )
@slow
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(lowerCAmelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , '''image_classification_no_trainer''' ) ) )
| 324 | 1 |
lowerCAmelCase__ = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCAmelCase__ = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = start
# add current to visited
visited.append(lowerCamelCase__ )
lowercase__ : List[Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase__ : int = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# if all neighbors visited add current to sort
sort.append(lowerCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
lowercase__ : Tuple = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase__ = topological_sort('''a''', [], [])
print(sort)
| 355 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
lowerCAmelCase__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase__ = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase__ = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase__ = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase__ = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase__ = '''mid_block.attentions.0.'''
lowerCAmelCase__ = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{j}.'''
lowerCAmelCase__ = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowercase__ : str = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowercase__ : int = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = v
lowercase__ : Union[str, Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase__ = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase__ = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase__ = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase__ = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase__ = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase__ = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase__ = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase__ = f'''mid_block.resnets.{i}.'''
lowerCAmelCase__ = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowercase__ : Optional[int] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowercase__ : List[str] = v.replace(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = v
lowercase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowercase__ : Optional[int] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
lowercase__ : Dict = reshape_weight_for_sd(lowerCamelCase__ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase__ = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
lowerCAmelCase__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase__ = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase__ = {'''q''': 0, '''k''': 1, '''v''': 2}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = {}
lowercase__ : List[Any] = {}
lowercase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowercase__ : int = k[: -len(".q_proj.weight" )]
lowercase__ : Optional[Any] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowercase__ : Dict = [None, None, None]
lowercase__ : Any = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowercase__ : Optional[int] = k[: -len(".q_proj.bias" )]
lowercase__ : Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowercase__ : str = [None, None, None]
lowercase__ : str = v
continue
lowercase__ : Union[str, Any] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : List[Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : str = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Any = torch.cat(lowerCamelCase__ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowercase__ : List[str] = textenc_pattern.sub(lambda lowerCamelCase__ : protected[re.escape(m.group(0 ) )] , lowerCamelCase__ )
lowercase__ : Tuple = torch.cat(lowerCamelCase__ )
return new_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
lowerCAmelCase__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase__ = load_file(unet_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
lowerCAmelCase__ = load_file(vae_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
lowerCAmelCase__ = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
lowerCAmelCase__ = load_file(text_enc_path, device='''cpu''')
else:
lowerCAmelCase__ = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
lowerCAmelCase__ = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
lowerCAmelCase__ = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase__ = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase__ = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase__ = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase__ = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase__ = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase__ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase__ = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase__ = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase__ = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 121 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __A :
def __init__( self : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=13 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : List[str]=[1, 1, 2] , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Dict=32 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : Tuple="gelu_new" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Tuple=False , ):
lowerCAmelCase : str = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : Optional[int] = seq_length
lowerCAmelCase : Dict = is_training
lowerCAmelCase : Any = use_input_mask
lowerCAmelCase : List[str] = use_token_type_ids
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : Optional[int] = vocab_size
lowerCAmelCase : str = block_sizes
lowerCAmelCase : Optional[int] = num_decoder_layers
lowerCAmelCase : Optional[Any] = d_model
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Optional[int] = d_head
lowerCAmelCase : Dict = d_inner
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Dict = hidden_dropout
lowerCAmelCase : Optional[Any] = attention_dropout
lowerCAmelCase : Tuple = activation_dropout
lowerCAmelCase : Optional[int] = max_position_embeddings
lowerCAmelCase : int = type_vocab_size
lowerCAmelCase : Optional[Any] = 2
lowerCAmelCase : List[Any] = num_labels
lowerCAmelCase : List[Any] = num_choices
lowerCAmelCase : Optional[int] = scope
lowerCAmelCase : str = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase : int = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase : Dict = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase : int = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase : Any = self.num_hidden_layers + 2
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Dict = None
if self.use_input_mask:
lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : int = None
if self.use_token_type_ids:
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : List[str] = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Dict = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , ):
lowerCAmelCase : List[Any] = TFFunnelModel(config=_UpperCamelCase )
lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase )
lowerCAmelCase : str = [input_ids, input_mask]
lowerCAmelCase : Optional[int] = model(_UpperCamelCase )
lowerCAmelCase : str = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = TFFunnelModel(config=_UpperCamelCase )
lowerCAmelCase : str = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase : Any = False
lowerCAmelCase : Dict = TFFunnelModel(config=_UpperCamelCase )
lowerCAmelCase : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowercase__ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , ):
lowerCAmelCase : List[Any] = TFFunnelBaseModel(config=_UpperCamelCase )
lowerCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : List[str] = model(_UpperCamelCase )
lowerCAmelCase : List[str] = [input_ids, input_mask]
lowerCAmelCase : Tuple = model(_UpperCamelCase )
lowerCAmelCase : List[str] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase : List[str] = False
lowerCAmelCase : List[Any] = TFFunnelBaseModel(config=_UpperCamelCase )
lowerCAmelCase : List[str] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase : Any = False
lowerCAmelCase : int = TFFunnelBaseModel(config=_UpperCamelCase )
lowerCAmelCase : List[Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , ):
lowerCAmelCase : Union[str, Any] = TFFunnelForPreTraining(config=_UpperCamelCase )
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : Dict = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
lowerCAmelCase : Tuple = TFFunnelForMaskedLM(config=_UpperCamelCase )
lowerCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : int = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
lowerCAmelCase : List[str] = self.num_labels
lowerCAmelCase : Tuple = TFFunnelForSequenceClassification(config=_UpperCamelCase )
lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , ):
lowerCAmelCase : str = self.num_choices
lowerCAmelCase : Dict = TFFunnelForMultipleChoice(config=_UpperCamelCase )
lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : List[str] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCAmelCase : Union[str, Any] = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , ):
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : List[Any] = TFFunnelForTokenClassification(config=_UpperCamelCase )
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : int = model(_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , ):
lowerCAmelCase : List[str] = TFFunnelForQuestionAnswering(config=_UpperCamelCase )
lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase : int = model(_UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __A ( __a , __a , unittest.TestCase ):
lowerCAmelCase_ : Dict = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[str] = False
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = TFFunnelModelTester(self )
lowerCAmelCase : int = ConfigTester(self , config_class=_UpperCamelCase )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@require_tf
class __A ( __a , unittest.TestCase ):
lowerCAmelCase_ : int = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Optional[Any] = False
def lowercase__ ( self : Dict ):
lowerCAmelCase : Tuple = TFFunnelModelTester(self , base=_UpperCamelCase )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=_UpperCamelCase )
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Dict ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_UpperCamelCase )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def lowercase__ ( self : Dict ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
| 138 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 281 |
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
if n_term == "":
return []
a :list = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
snake_case : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 281 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.