code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ : Optional[int] = logging.get_logger(__name__)
a__ : str = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
a__ : List[Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
a__ : Dict = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = set()
__UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase = char
__UpperCamelCase = set(__A )
return pairs
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowercase , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , **lowercase , ) -> List[str]:
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , **lowercase , )
__UpperCamelCase = vocab_file
__UpperCamelCase = merges_file
__UpperCamelCase = {}
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 2
__UpperCamelCase = 3
self.add_from_file(lowercase )
__UpperCamelCase = {v: k for k, v in self.encoder.items()}
with open(lowercase , encoding="""utf-8""" ) as merges_handle:
__UpperCamelCase = merges_handle.read().split("""\n""" )[:-1]
__UpperCamelCase = [tuple(merge.split()[:-1] ) for merge in merges]
__UpperCamelCase = dict(zip(lowercase , range(len(lowercase ) ) ) )
__UpperCamelCase = {}
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
__UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ) -> Dict:
return len(self.encoder )
def __lowerCamelCase ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
if token in self.cache:
return self.cache[token]
__UpperCamelCase = tuple(lowercase )
__UpperCamelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__UpperCamelCase = get_pairs(lowercase )
if not pairs:
return token
while True:
__UpperCamelCase = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase = bigram
__UpperCamelCase = []
__UpperCamelCase = 0
while i < len(lowercase ):
try:
__UpperCamelCase = word.index(lowercase , lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase = j
if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase = tuple(lowercase )
__UpperCamelCase = new_word
if len(lowercase ) == 1:
break
else:
__UpperCamelCase = get_pairs(lowercase )
__UpperCamelCase = """@@ """.join(lowercase )
__UpperCamelCase = word[:-4]
__UpperCamelCase = word
return word
def __lowerCamelCase ( self , lowercase ) -> int:
__UpperCamelCase = []
__UpperCamelCase = re.findall(r"""\S+\n?""" , lowercase )
for token in words:
split_tokens.extend(list(self.bpe(lowercase ).split(""" """ ) ) )
return split_tokens
def __lowerCamelCase ( self , lowercase ) -> List[str]:
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , lowercase ) -> int:
return self.decoder.get(lowercase , self.unk_token )
def __lowerCamelCase ( self , lowercase ) -> Any:
__UpperCamelCase = """ """.join(lowercase ).replace("""@@ """ , """""" ).strip()
return out_string
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__UpperCamelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCamelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
if os.path.abspath(self.merges_file ) != os.path.abspath(lowercase ):
copyfile(self.merges_file , lowercase )
return out_vocab_file, out_merge_file
def __lowerCamelCase ( self , lowercase ) -> List[Any]:
if isinstance(lowercase , lowercase ):
try:
with open(lowercase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(lowercase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f"Incorrect encoding detected in {f}, please rebuild the dataset" )
return
__UpperCamelCase = f.readlines()
for lineTmp in lines:
__UpperCamelCase = lineTmp.strip()
__UpperCamelCase = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
__UpperCamelCase = line[:idx]
__UpperCamelCase = len(self.encoder )
| 601
|
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a__ : Optional[int] = 'pt'
elif is_tf_available():
a__ : List[Any] = 'tf'
else:
a__ : Any = 'jax'
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = ByTaTokenizer
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> List[Any]:
super().setUp()
__UpperCamelCase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ) -> Optional[int]:
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def __lowerCamelCase ( self , **lowercase ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase=False , lowercase=2_0 , lowercase=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCamelCase = []
for i in range(len(lowercase ) ):
try:
__UpperCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCamelCase = list(filter(lambda lowercase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , lowercase ) )
__UpperCamelCase = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
__UpperCamelCase = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
__UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
__UpperCamelCase = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
__UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
__UpperCamelCase = """ """ + output_txt
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.ta_base_tokenizer
__UpperCamelCase = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
__UpperCamelCase = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.ta_base_tokenizer
__UpperCamelCase = """Unicode €."""
__UpperCamelCase = tokenizer(lowercase )
__UpperCamelCase = [8_8, 1_1_3, 1_0_8, 1_0_2, 1_1_4, 1_0_3, 1_0_4, 3_5, 2_2_9, 1_3_3, 1_7_5, 4_9, 1]
self.assertEqual(encoded["""input_ids"""] , lowercase )
# decoding
__UpperCamelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """Unicode €.</s>""" )
__UpperCamelCase = tokenizer("""e è é ê ë""" )
__UpperCamelCase = [1_0_4, 3_5, 1_9_8, 1_7_1, 3_5, 1_9_8, 1_7_2, 3_5, 1_9_8, 1_7_3, 3_5, 1_9_8, 1_7_4, 1]
self.assertEqual(encoded["""input_ids"""] , lowercase )
# decoding
__UpperCamelCase = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = self.ta_base_tokenizer
__UpperCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
__UpperCamelCase = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 1, 0]
# fmt: on
__UpperCamelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
__UpperCamelCase = list(batch.input_ids.numpy()[0] )
else:
__UpperCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 3_7) , batch.input_ids.shape )
self.assertEqual((2, 3_7) , batch.attention_mask.shape )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.ta_base_tokenizer
__UpperCamelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCamelCase = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertNotIn("""decoder_input_ids""" , lowercase )
self.assertNotIn("""decoder_attention_mask""" , lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.ta_base_tokenizer
__UpperCamelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
__UpperCamelCase = tokenizer(
text_target=lowercase , max_length=3_2 , padding="""max_length""" , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = self.ta_base_tokenizer
__UpperCamelCase = ["""A long paragraph for summarization. </s>"""]
__UpperCamelCase = ["""Summary of the text. </s>"""]
# fmt: off
__UpperCamelCase = [6_8, 3_5, 1_1_1, 1_1_4, 1_1_3, 1_0_6, 3_5, 1_1_5, 1_0_0, 1_1_7, 1_0_0, 1_0_6, 1_1_7, 1_0_0, 1_1_5, 1_0_7, 3_5, 1_0_5, 1_1_4, 1_1_7, 3_5, 1_1_8, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_0_8, 1_2_5, 1_0_0, 1_1_9, 1_0_8, 1_1_4, 1_1_3, 4_9, 3_5, 1]
__UpperCamelCase = [8_6, 1_2_0, 1_1_2, 1_1_2, 1_0_0, 1_1_7, 1_2_4, 3_5, 1_1_4, 1_0_5, 3_5, 1_1_9, 1_0_7, 1_0_4, 3_5, 1_1_9, 1_0_4, 1_2_3, 1_1_9, 4_9, 3_5, 1]
# fmt: on
__UpperCamelCase = tokenizer(lowercase , text_target=lowercase )
self.assertEqual(lowercase , batch["""input_ids"""][0] )
self.assertEqual(lowercase , batch["""labels"""][0] )
def __lowerCamelCase ( self ) -> List[Any]:
# safety check on max_len default value so we are sure the test works
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = """ He is very happy, UNwant\u00E9d,running"""
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
__UpperCamelCase = tokenizer.__class__.from_pretrained(lowercase )
__UpperCamelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
__UpperCamelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
__UpperCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
__UpperCamelCase = tokenizer.__class__.from_pretrained(lowercase )
__UpperCamelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__UpperCamelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowercase )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCamelCase = json.load(lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCamelCase = json.load(lowercase )
__UpperCamelCase = [f"<extra_id_{i}>" for i in range(1_2_5 )]
__UpperCamelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
__UpperCamelCase = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowercase )]
__UpperCamelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def __lowerCamelCase ( self ) -> List[Any]:
__UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
__UpperCamelCase = tokenizer_class.from_pretrained(lowercase )
self.assertTrue(tokenizer.decode([2_5_5] ) == """""" )
def __lowerCamelCase ( self ) -> Union[str, Any]:
pass
def __lowerCamelCase ( self ) -> Optional[Any]:
pass
def __lowerCamelCase ( self ) -> Optional[int]:
pass
def __lowerCamelCase ( self ) -> List[Any]:
pass
def __lowerCamelCase ( self ) -> List[str]:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__UpperCamelCase = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__UpperCamelCase = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
__UpperCamelCase = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__UpperCamelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__UpperCamelCase = 0
__UpperCamelCase = tokenizer.convert_ids_to_tokens(
lowercase , skip_special_tokens=lowercase )
for attr in attributes_list:
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [] )
setattr(lowercase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 601
| 1
|
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase_ = True
except (ImportError, AttributeError):
UpperCAmelCase_ = object
def lowerCAmelCase_ ( *__UpperCAmelCase: Any , **__UpperCAmelCase: List[str] ) -> Tuple:
pass
UpperCAmelCase_ = False
UpperCAmelCase_ = logging.get_logger('transformers-cli/serving')
def lowerCAmelCase_ ( __UpperCAmelCase: Namespace ) -> List[str]:
UpperCamelCase__ : Union[str, Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(__UpperCAmelCase , args.host , args.port , args.workers )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : dict
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : List[str]
a : Optional[List[int]]
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : str
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Any
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
@staticmethod
def UpperCamelCase__ ( __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = parser.add_parser(
'''serve''', help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''', type=__magic_name__, choices=get_supported_tasks(), help='''The task to run the pipeline on''', )
serve_parser.add_argument('''--host''', type=__magic_name__, default='''localhost''', help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''', type=__magic_name__, default=8888, help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''', type=__magic_name__, default=1, help='''Number of http workers''' )
serve_parser.add_argument('''--model''', type=__magic_name__, help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''', type=__magic_name__, help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''', type=__magic_name__, help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''', type=__magic_name__, default=-1, help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''', )
serve_parser.set_defaults(func=__magic_name__ )
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = pipeline
UpperCamelCase__ : Tuple = host
UpperCamelCase__ : Dict = port
UpperCamelCase__ : int = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"Serving model over {host}:{port}" )
UpperCamelCase__ : Any = FastAPI(
routes=[
APIRoute(
'''/''', self.model_info, response_model=__magic_name__, response_class=__magic_name__, methods=['''GET'''], ),
APIRoute(
'''/tokenize''', self.tokenize, response_model=__magic_name__, response_class=__magic_name__, methods=['''POST'''], ),
APIRoute(
'''/detokenize''', self.detokenize, response_model=__magic_name__, response_class=__magic_name__, methods=['''POST'''], ),
APIRoute(
'''/forward''', self.forward, response_model=__magic_name__, response_class=__magic_name__, methods=['''POST'''], ),
], timeout=600, )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
run(self._app, host=self.host, port=self.port, workers=self.workers )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCamelCase__ ( self, __magic_name__ = Body(__magic_name__, embed=__magic_name__ ), __magic_name__ = Body(__magic_name__, embed=__magic_name__ ) ) -> Optional[Any]:
"""simple docstring"""
try:
UpperCamelCase__ : Any = self._pipeline.tokenizer.tokenize(__magic_name__ )
if return_ids:
UpperCamelCase__ : int = self._pipeline.tokenizer.convert_tokens_to_ids(__magic_name__ )
return ServeTokenizeResult(tokens=__magic_name__, tokens_ids=__magic_name__ )
else:
return ServeTokenizeResult(tokens=__magic_name__ )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(__magic_name__ )} )
def UpperCamelCase__ ( self, __magic_name__ = Body(__magic_name__, embed=__magic_name__ ), __magic_name__ = Body(__magic_name__, embed=__magic_name__ ), __magic_name__ = Body(__magic_name__, embed=__magic_name__ ), ) -> Tuple:
"""simple docstring"""
try:
UpperCamelCase__ : List[Any] = self._pipeline.tokenizer.decode(__magic_name__, __magic_name__, __magic_name__ )
return ServeDeTokenizeResult(model='''''', text=__magic_name__ )
except Exception as e:
raise HTTPException(status_code=500, detail={'''model''': '''''', '''error''': str(__magic_name__ )} )
async def UpperCamelCase__ ( self, __magic_name__=Body(__magic_name__, embed=__magic_name__ ) ) -> Tuple:
"""simple docstring"""
# Check we don't have empty string
if len(__magic_name__ ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
UpperCamelCase__ : str = self._pipeline(__magic_name__ )
return ServeForwardResult(output=__magic_name__ )
except Exception as e:
raise HTTPException(500, {'''error''': str(__magic_name__ )} )
| 369
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : torch.FloatTensor
a : torch.FloatTensor
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : List[Any] = 1
@register_to_config
def __init__( self, __magic_name__ = 2000, __magic_name__ = 0.15, __magic_name__ = 0.01, __magic_name__ = 1348.0, __magic_name__ = 1E-5, __magic_name__ = 1, ) -> int:
"""simple docstring"""
# standard deviation of the initial noise distribution
UpperCamelCase__ : int = sigma_max
# setable values
UpperCamelCase__ : Optional[int] = None
self.set_sigmas(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None ) -> int:
"""simple docstring"""
UpperCamelCase__ : str = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase__ : List[Any] = torch.linspace(1, __magic_name__, __magic_name__, device=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase__ : str = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase__ : Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase__ : Optional[Any] = torch.exp(torch.linspace(math.log(__magic_name__ ), math.log(__magic_name__ ), __magic_name__ ) )
UpperCamelCase__ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
return torch.where(
timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = True, ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCamelCase__ : Any = timestep * torch.ones(
sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase__ : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase__ : List[Any] = timesteps.to(self.discrete_sigmas.device )
UpperCamelCase__ : str = self.discrete_sigmas[timesteps].to(sample.device )
UpperCamelCase__ : List[Any] = self.get_adjacent_sigma(__magic_name__, __magic_name__ ).to(sample.device )
UpperCamelCase__ : Optional[Any] = torch.zeros_like(__magic_name__ )
UpperCamelCase__ : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase__ : Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCamelCase__ : Any = diffusion.unsqueeze(-1 )
UpperCamelCase__ : str = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase__ : Union[str, Any] = randn_tensor(
sample.shape, layout=sample.layout, generator=__magic_name__, device=sample.device, dtype=sample.dtype )
UpperCamelCase__ : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase__ : str = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__magic_name__, prev_sample_mean=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None, __magic_name__ = True, ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase__ : List[str] = randn_tensor(sample.shape, layout=sample.layout, generator=__magic_name__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCamelCase__ : str = torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean()
UpperCamelCase__ : Tuple = torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean()
UpperCamelCase__ : Union[str, Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase__ : Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase__ : Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCamelCase__ : int = step_size.unsqueeze(-1 )
UpperCamelCase__ : int = sample + step_size * model_output
UpperCamelCase__ : List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase__ : Any = timesteps.to(original_samples.device )
UpperCamelCase__ : List[str] = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCamelCase__ : Any = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__magic_name__ ) * sigmas[:, None, None, None]
)
UpperCamelCase__ : int = noise + original_samples
return noisy_samples
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 369
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A ( unittest.TestCase ):
def __init__( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : int=7 , __magic_name__ : List[str]=3 , __magic_name__ : Optional[Any]=18 , __magic_name__ : Optional[Any]=30 , __magic_name__ : Any=400 , __magic_name__ : List[Any]=True , __magic_name__ : List[Any]=None , __magic_name__ : List[str]=True , ):
"""simple docstring"""
lowerCAmelCase__ = size if size is not None else {"height": 18, "width": 18}
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = min_resolution
lowerCAmelCase__ = max_resolution
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = apply_ocr
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = LayoutLMvaImageProcessingTester(self )
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , "do_resize" ) )
self.assertTrue(hasattr(__magic_name__ , "size" ) )
self.assertTrue(hasattr(__magic_name__ , "apply_ocr" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCAmelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __magic_name__ )
self.assertIsInstance(encoding.boxes , __magic_name__ )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input
lowerCAmelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase__ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCAmelCase__ = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
lowerCAmelCase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __magic_name__ )
self.assertListEqual(encoding.boxes , __magic_name__ )
# with apply_OCR = False
lowerCAmelCase__ = LayoutLMvaImageProcessor(apply_ocr=__magic_name__ )
lowerCAmelCase__ = image_processing(__magic_name__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 48
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def _a ( __lowerCAmelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def _a ( __lowerCAmelCase : int = 2_00_00_00 ):
"""simple docstring"""
return sum(takewhile(lambda __lowerCAmelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 347
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowercase : int = logging.getLogger(__name__)
def lowercase__ ( ):
__UpperCAmelCase = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=snake_case_ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=snake_case_ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=snake_case_ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=snake_case_ , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=snake_case_ , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=snake_case_ , type=snake_case_ , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=snake_case_ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=snake_case_ , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
__UpperCAmelCase = parser.parse_args()
return args
def lowercase__ ( snake_case_ :Optional[int] ):
def fn(snake_case_ :Dict ):
return tokenizer(examples['''text'''] )
return fn
def lowercase__ ( snake_case_ :Optional[Any] ):
__UpperCAmelCase = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
__UpperCAmelCase = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
__UpperCAmelCase = tf.train.Features(feature=snake_case_ )
__UpperCAmelCase = tf.train.Example(features=snake_case_ )
__UpperCAmelCase = example.SerializeToString()
records.append(snake_case_ )
return records
def lowercase__ ( snake_case_ :Dict ):
__UpperCAmelCase = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
__UpperCAmelCase = min(len(snake_case_ ) , args.limit )
__UpperCAmelCase = dataset.select(range(snake_case_ ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
__UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
__UpperCAmelCase = os.path.join(args.output_dir , args.split )
if not os.path.exists(snake_case_ ):
os.makedirs(snake_case_ )
else:
__UpperCAmelCase = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
__UpperCAmelCase = tokenize_function(snake_case_ )
__UpperCAmelCase = dataset.map(snake_case_ , batched=snake_case_ , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(snake_case_ :str ):
# Concatenate all texts.
__UpperCAmelCase = {k: sum(examples[k] , [] ) for k in examples.keys()}
__UpperCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
__UpperCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
__UpperCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0 , snake_case_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
__UpperCAmelCase = dataset_tokenized.map(snake_case_ , batched=snake_case_ , batch_size=1_000 , num_proc=4 )
__UpperCAmelCase = 0
__UpperCAmelCase = 0
for shard in range(0 , len(snake_case_ ) , args.shard_size ):
__UpperCAmelCase = grouped_dataset[shard : shard + args.shard_size]
__UpperCAmelCase = len(dataset_snapshot['''input_ids'''] )
__UpperCAmelCase = os.path.join(snake_case_ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
__UpperCAmelCase = get_serialized_examples(snake_case_ )
with tf.io.TFRecordWriter(snake_case_ ) as out_file:
for i in range(len(snake_case_ ) ):
__UpperCAmelCase = serialized_examples[i]
out_file.write(snake_case_ )
print('''Wrote file {} containing {} records'''.format(snake_case_ , snake_case_ ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''' , '''w''' ) as f:
print(F'''Total {args.split} records: {total_records}''' , file=snake_case_ )
if __name__ == "__main__":
_lowercase : Tuple = parse_args()
main(args)
| 397
|
"""simple docstring"""
def lowercase__ ( snake_case_ :List[Any] , snake_case_ :str , snake_case_ :Tuple , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[Any] ):
if index == r:
for j in range(snake_case_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase = arr[i]
combination_util(snake_case_ , snake_case_ , snake_case_ , index + 1 , snake_case_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :str , snake_case_ :List[str] ):
# A temporary array to store all combination one by one
__UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case_ , snake_case_ , snake_case_ , 0 , snake_case_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_lowercase : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 397
| 1
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase :
"""simple docstring"""
def __init__( self : List[str] , a_ : Any , a_ : Dict=13 , a_ : Dict=7 , a_ : Dict=True , a_ : Tuple=True , a_ : Dict=99 , a_ : Union[str, Any]=32 , a_ : Any=5 , a_ : Union[str, Any]=4 , a_ : Optional[Any]=37 , a_ : Dict="gelu" , a_ : Optional[int]=0.1 , a_ : int=0.1 , a_ : Optional[int]=50 , a_ : str=0.0_2 , a_ : Tuple=True , a_ : str=None , ):
"""simple docstring"""
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_labels
lowerCamelCase__ = scope
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase__ = True
lowerCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : int , a_ : Tuple , a_ : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any] , **a_ : Optional[Any] , ):
"""simple docstring"""
lowerCamelCase__ = BertGenerationEncoder(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowerCamelCase__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : List[Any] , a_ : Any , a_ : List[str] , a_ : Dict , a_ : Tuple , a_ : int , a_ : Optional[Any] , **a_ : List[str] , ):
"""simple docstring"""
lowerCamelCase__ = True
lowerCamelCase__ = BertGenerationEncoder(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowerCamelCase__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , a_ : Optional[int] , a_ : Optional[Any] , a_ : Any , a_ : Optional[int] , a_ : Optional[Any] , a_ : Tuple , **a_ : str , ):
"""simple docstring"""
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = BertGenerationDecoder(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
# first forward pass
lowerCamelCase__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowerCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowerCamelCase__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowerCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def _UpperCamelCase ( self : List[Any] , a_ : int , a_ : Optional[int] , a_ : Any , a_ : Any , *a_ : Union[str, Any] , ):
"""simple docstring"""
lowerCamelCase__ = BertGenerationDecoder(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.prepare_config_and_inputs()
lowerCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
snake_case_ = (BertGenerationDecoder,) if is_torch_available() else ()
snake_case_ = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = BertGenerationEncoderTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ = """bert"""
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCamelCase__ )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
lowerCamelCase__ = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCamelCase__ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
lowerCamelCase__ = model(UpperCamelCase__ )[0]
lowerCamelCase__ = torch.Size([1, 8, 10_24] )
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
lowerCamelCase__ = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
lowerCamelCase__ = torch.tensor([[1_01, 75_92, 10_10, 20_26, 38_99, 20_03, 1_01_40, 1_02]] )
with torch.no_grad():
lowerCamelCase__ = model(UpperCamelCase__ )[0]
lowerCamelCase__ = torch.Size([1, 8, 5_03_58] )
self.assertEqual(output.shape , UpperCamelCase__ )
lowerCamelCase__ = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 165
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = "PoolFormerConfig"
# Base docstring
__lowerCAmelCase = "sail/poolformer_s12"
__lowerCAmelCase = [1, 5_12, 7, 7]
# Image classification docstring
__lowerCAmelCase = "sail/poolformer_s12"
__lowerCAmelCase = "tabby, tabby cat"
__lowerCAmelCase = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : float = 0.0 , lowercase_ : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
a_ = 1 - drop_prob
a_ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
a_ = keep_prob + torch.rand(lowercase_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
a_ = input.div(lowercase_ ) * random_tensor
return output
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ = None ):
"""simple docstring"""
super().__init__()
a_ = drop_prob
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _a ( self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
"""simple docstring"""
super().__init__()
a_ = patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
a_ = stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
a_ = padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
a_ = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
a_ = norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = self.projection(UpperCamelCase__ )
a_ = self.norm(UpperCamelCase__ )
return embeddings
class __SCREAMING_SNAKE_CASE (nn.GroupNorm ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
return self.pool(UpperCamelCase__ ) - hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
a_ = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
a_ = PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
a_ = ACTaFN[config.hidden_act]
else:
a_ = config.hidden_act
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = self.conva(UpperCamelCase__ )
a_ = self.act_fn(UpperCamelCase__ )
a_ = self.drop(UpperCamelCase__ )
a_ = self.conva(UpperCamelCase__ )
a_ = self.drop(UpperCamelCase__ )
return hidden_states
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = PoolFormerPooling(UpperCamelCase__ )
a_ = PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
a_ = PoolFormerGroupNorm(UpperCamelCase__ )
a_ = PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
a_ = PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
a_ = config.use_layer_scale
if config.use_layer_scale:
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
a_ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
if self.use_layer_scale:
a_ = self.pooling(self.before_norm(UpperCamelCase__ ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
a_ = hidden_states + self.drop_path(UpperCamelCase__ )
a_ = ()
a_ = self.output(self.after_norm(UpperCamelCase__ ) )
a_ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
a_ = hidden_states + self.drop_path(UpperCamelCase__ )
a_ = (output,) + outputs
return outputs
else:
a_ = self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
a_ = pooling_output + hidden_states
a_ = ()
# Second residual connection inside the PoolFormerOutput block
a_ = self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
a_ = hidden_states + layer_output
a_ = (output,) + outputs
return outputs
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = config
# stochastic depth decay rule
a_ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
a_ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
a_ = nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
a_ = []
a_ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
a_ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
a_ = nn.ModuleList(UpperCamelCase__ )
def _a ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True ):
"""simple docstring"""
a_ = () if output_hidden_states else None
a_ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
a_ , a_ = layers
# Get patch embeddings from hidden_states
a_ = embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
a_ = blk(UpperCamelCase__ )
a_ = layer_outputs[0]
if output_hidden_states:
a_ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : int = PoolFormerConfig
_a : Optional[Any] = '''poolformer'''
_a : Union[str, Any] = '''pixel_values'''
_a : Optional[int] = True
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _a ( self , UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
a_ = value
__lowerCAmelCase = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCAmelCase = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __A , )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
a_ = config
a_ = PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _a ( self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
"""simple docstring"""
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
a_ = self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
a_ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __SCREAMING_SNAKE_CASE (nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__()
a_ = nn.Linear(config.hidden_size , config.hidden_size )
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ = self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __A , )
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
a_ = config.num_labels
a_ = PoolFormerModel(UpperCamelCase__ )
# Final norm
a_ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
a_ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
"""simple docstring"""
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
a_ = outputs[0]
a_ = self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
a_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a_ = 'single_label_classification'
else:
a_ = 'multi_label_classification'
if self.config.problem_type == "regression":
a_ = MSELoss()
if self.num_labels == 1:
a_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
a_ = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
a_ = CrossEntropyLoss()
a_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a_ = BCEWithLogitsLoss()
a_ = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
a_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 536
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
a_ = logging.get_logger(__name__)
class __lowerCAmelCase :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
return F"""`pip install {cls.pip_package or cls.name}`"""
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """optuna"""
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_optuna(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_optuna(__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """ray"""
lowerCAmelCase__ = """'ray[tune]'"""
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_ray_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_ray(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_ray(__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """sigopt"""
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_sigopt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_sigopt(__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """wandb"""
@staticmethod
def lowerCamelCase ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return run_hp_search_wandb(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return default_hp_space_wandb(__UpperCAmelCase )
a_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a__ ( ):
__lowerCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCamelCase ) > 0:
__lowerCamelCase = available_backends[0].name
if len(_UpperCamelCase ) > 1:
logger.info(
F"""{len(_UpperCamelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 622
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""uclanlp/visualbert-vqa""": """https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-pre""": """https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vqa-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-vcr""": """https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-pre""": """https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json""",
"""uclanlp/visualbert-vcr-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"""
),
"""uclanlp/visualbert-nlvr2""": """https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-pre""": """https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json""",
"""uclanlp/visualbert-nlvr2-coco-pre""": (
"""https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"""
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """visual_bert"""
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=512 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = visual_embedding_dim
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = type_vocab_size
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = bypass_transformer
__lowerCamelCase = special_visual_initialize
| 622
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_UpperCAmelCase : str = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_UpperCAmelCase : Tuple = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _SCREAMING_SNAKE_CASE ( ):
_A = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_A = bs[:]
_A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__snake_case )
cs.append(2**8 + n )
n += 1
_A = [chr(__snake_case ) for n in cs]
return dict(zip(__snake_case , __snake_case ) )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int]="replace", UpperCamelCase__ : List[str]="<s>", UpperCamelCase__ : Any="</s>", UpperCamelCase__ : List[str]="</s>", UpperCamelCase__ : List[Any]="<s>", UpperCamelCase__ : Optional[Any]="<unk>", UpperCamelCase__ : Tuple="<pad>", UpperCamelCase__ : Optional[int]="<mask>", UpperCamelCase__ : List[str]=False, **UpperCamelCase__ : Optional[Any], ) -> Dict:
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else unk_token
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__, bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, unk_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, **UpperCamelCase__, )
with open(UpperCamelCase__, encoding='utf-8' ) as vocab_handle:
_A = json.load(UpperCamelCase__ )
_A = {v: k for k, v in self.encoder.items()}
_A = errors # how to handle errors in decoding
_A = bytes_to_unicode()
_A = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__, encoding='utf-8' ) as merges_handle:
_A = merges_handle.read().split('\n' )[1:-1]
_A = [tuple(merge.split() ) for merge in bpe_merges]
_A = dict(zip(UpperCamelCase__, range(len(UpperCamelCase__ ) ) ) )
_A = {}
_A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
return len(self.encoder )
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
return dict(self.encoder, **self.added_tokens_encoder )
def __UpperCAmelCase ( self : str, UpperCamelCase__ : Optional[int] ) -> Any:
if token in self.cache:
return self.cache[token]
_A = tuple(UpperCamelCase__ )
_A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
_A = min(UpperCamelCase__, key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__, float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(UpperCamelCase__ ):
try:
_A = word.index(UpperCamelCase__, UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(UpperCamelCase__ )
_A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
_A = get_pairs(UpperCamelCase__ )
_A = ' '.join(UpperCamelCase__ )
_A = word
return word
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : int ) -> Optional[Any]:
_A = []
for token in re.findall(self.pat, UpperCamelCase__ ):
_A = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(' ' ) )
return bpe_tokens
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Any ) -> Dict:
return self.encoder.get(UpperCamelCase__, self.encoder.get(self.unk_token ) )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Any ) -> Optional[int]:
return self.decoder.get(UpperCamelCase__ )
def __UpperCAmelCase ( self : int, UpperCamelCase__ : int ) -> Any:
_A = ''.join(UpperCamelCase__ )
_A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8', errors=self.errors )
return text
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_A = os.path.join(
UpperCamelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_A = os.path.join(
UpperCamelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=UpperCamelCase__, ensure_ascii=UpperCamelCase__ ) + '\n' )
_A = 0
with open(UpperCamelCase__, 'w', encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
_A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None, UpperCamelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[Any]=False, **UpperCamelCase__ : int ) -> Tuple:
_A = kwargs.pop('add_prefix_space', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
_A = ' ' + text
return (text, kwargs)
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any, UpperCamelCase__ : "Conversation" ) -> List[int]:
_A = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
_A = ' '.join(UpperCamelCase__ )
_A = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
_A = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 107
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase ={"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = FunnelTokenizer
SCREAMING_SNAKE_CASE_ = FunnelTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> int:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE_ )
for tokenizer in tokenizers:
lowerCamelCase_ = tokenizer('UNwant\u00E9d,running' )
lowerCamelCase_ = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
lowerCamelCase_ = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 384
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_="train" ) -> List[Any]:
'''simple docstring'''
assert os.path.isdir(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = []
lowerCamelCase_ = os.listdir(SCREAMING_SNAKE_CASE_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase_ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE_ )
def __len__( self ) -> List[str]:
'''simple docstring'''
return len(self.documents )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.documents[idx]
lowerCamelCase_ = document_path.split('/' )[-1]
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as source:
lowerCamelCase_ = source.read()
lowerCamelCase_ ,lowerCamelCase_ = process_story(SCREAMING_SNAKE_CASE_ )
return document_name, story_lines, summary_lines
def _UpperCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
lowerCamelCase_ = list(filter(lambda __UpperCamelCase : len(__UpperCamelCase ) != 0 ,[line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase_ = [_add_missing_period(__UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase_ = []
lowerCamelCase_ = deque(__UpperCamelCase )
while True:
try:
lowerCamelCase_ = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(__UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase_ = list(filter(lambda __UpperCamelCase : not t.startswith('@highlight' ) ,__UpperCamelCase ) )
return story_lines, summary_lines
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
if len(__UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__UpperCamelCase )) )
return sequence
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
lowerCamelCase_ = torch.ones_like(__UpperCamelCase )
lowerCamelCase_ = sequence == pad_token_id
lowerCamelCase_ = 0
return mask
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
lowerCamelCase_ = [tokenizer.encode(__UpperCamelCase ) for line in story_lines]
lowerCamelCase_ = [token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase_ = [tokenizer.encode(__UpperCamelCase ) for line in summary_lines]
lowerCamelCase_ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = []
for sequence in batch:
lowerCamelCase_ = -1
lowerCamelCase_ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__UpperCamelCase )
return torch.tensor(__UpperCamelCase )
| 384
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : Tuple = [[0 for _ in range(A_ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
snake_case_ : List[str] = 1
for n in range(m + 1 ):
for k in range(1 , A_ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
a_ = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
a_ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 480
|
'''simple docstring'''
from pathlib import Path
import fire
def lowerCamelCase_ ( A_ , A_ , A_ ):
__lowerCamelCase = Path(A_ )
__lowerCamelCase = Path(A_ )
dest_dir.mkdir(exist_ok=A_ )
for path in src_dir.iterdir():
__lowerCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__lowerCamelCase = dest_dir.joinpath(path.name )
print(A_ )
dest_path.open('''w''' ).write('''\n'''.join(A_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 316
| 0
|
"""simple docstring"""
import heapq
import sys
import numpy as np
__A = tuple[int, int]
class _snake_case :
def __init__( self : Optional[Any] ):
__lowerCamelCase : Any = []
__lowerCamelCase : List[Any] = set()
def lowerCamelCase__ ( self : Tuple ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def lowerCamelCase__ ( self : List[Any] ):
return len(self.elements ) == 0
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(UpperCAmelCase )
else:
# update
# print("update", item)
__lowerCamelCase : List[Any] = []
((__lowerCamelCase) , (__lowerCamelCase)) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowerCamelCase) , (__lowerCamelCase)) : Tuple = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Tuple ):
if item in self.set:
self.set.remove(UpperCAmelCase )
__lowerCamelCase : Dict = []
((__lowerCamelCase) , (__lowerCamelCase)) : Any = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCamelCase__ ( self : Optional[int] ):
return self.elements[0][1]
def lowerCamelCase__ ( self : Optional[Any] ):
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[Any] = heapq.heappop(self.elements )
self.set.remove(UpperCAmelCase )
return (priority, item)
def lowercase_ ( _lowerCamelCase: TPos , _lowerCamelCase: TPos ) -> int:
'''simple docstring'''
__lowerCamelCase : int = np.array(_lowerCamelCase )
__lowerCamelCase : Optional[int] = np.array(_lowerCamelCase )
return np.linalg.norm(a - b )
def lowercase_ ( _lowerCamelCase: TPos , _lowerCamelCase: TPos ) -> List[Any]:
'''simple docstring'''
return consistent_heuristic(_lowerCamelCase , _lowerCamelCase ) // t
def lowercase_ ( _lowerCamelCase: TPos , _lowerCamelCase: TPos ) -> Dict:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowercase_ ( _lowerCamelCase: TPos , _lowerCamelCase: int , _lowerCamelCase: TPos , _lowerCamelCase: dict[TPos, float] ) -> Any:
'''simple docstring'''
__lowerCamelCase : Dict = g_function[start] + Wa * heuristics[i](_lowerCamelCase , _lowerCamelCase )
return ans
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Dict , _lowerCamelCase: List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : List[Any] = np.chararray((n, n) )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__lowerCamelCase : Union[str, Any] = "*"
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if (j, (n - 1) - i) in blocks:
__lowerCamelCase : Any = "#"
__lowerCamelCase : Any = "-"
__lowerCamelCase : Optional[Any] = back_pointer[goal]
while x != start:
((__lowerCamelCase) , (__lowerCamelCase)) : Any = x
# print(x)
__lowerCamelCase : List[Any] = "-"
__lowerCamelCase : List[str] = back_pointer[x]
__lowerCamelCase : Optional[Any] = "-"
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__lowerCamelCase : Union[str, Any] = back_pointer[goal]
while x != start:
print(_lowerCamelCase , end=" " )
__lowerCamelCase : Dict = back_pointer[x]
print(_lowerCamelCase )
sys.exit()
def lowercase_ ( _lowerCamelCase: TPos ) -> List[Any]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: Tuple , _lowerCamelCase: Dict , _lowerCamelCase: Optional[Any] , _lowerCamelCase: str , _lowerCamelCase: int , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: List[Any] , ) -> int:
'''simple docstring'''
for itera in range(_lowerCamelCase ):
open_list[itera].remove_element(_lowerCamelCase )
# print("s", s)
# print("j", j)
((__lowerCamelCase) , (__lowerCamelCase)) : Tuple = s
__lowerCamelCase : Optional[int] = (x - 1, y)
__lowerCamelCase : str = (x + 1, y)
__lowerCamelCase : Tuple = (x, y + 1)
__lowerCamelCase : Any = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_lowerCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_lowerCamelCase )
__lowerCamelCase : Optional[int] = -1
__lowerCamelCase : Optional[Any] = float("inf" )
if valid(_lowerCamelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowerCamelCase : Tuple = g_function[s] + 1
__lowerCamelCase : Union[str, Any] = s
if neighbours not in close_list_anchor:
open_list[0].put(_lowerCamelCase , key(_lowerCamelCase , 0 , _lowerCamelCase , _lowerCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _lowerCamelCase ):
if key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) <= Wa * key(
_lowerCamelCase , 0 , _lowerCamelCase , _lowerCamelCase ):
open_list[j].put(
_lowerCamelCase , key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__A = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__A = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__A = make_common_ground()
__A = blocks_blk
# hyper parameters
__A = 1
__A = 1
__A = 20
__A = 3 # one consistent and two other inconsistent
# start and end destination
__A = (0, 0)
__A = (n - 1, n - 1)
__A = 1
def lowercase_ ( _lowerCamelCase: TPos , _lowerCamelCase: TPos , _lowerCamelCase: int ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : List[Any] = {start: 0, goal: float("inf" )}
__lowerCamelCase : Any = {start: -1, goal: -1}
__lowerCamelCase : Tuple = []
__lowerCamelCase : Any = set()
for i in range(_lowerCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_lowerCamelCase , key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
__lowerCamelCase : list[int] = []
__lowerCamelCase : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _lowerCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__lowerCamelCase , __lowerCamelCase : Dict = open_list[i].top_show()
visited.add(_lowerCamelCase )
expand_state(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
close_list_inad.append(_lowerCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
__lowerCamelCase : int = open_list[0].top_show()
visited.add(_lowerCamelCase )
expand_state(
_lowerCamelCase , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
close_list_anchor.append(_lowerCamelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_lowerCamelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 366
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( a__ ):
def __init__( self : str , UpperCAmelCase : TransformeraDModel , UpperCAmelCase : AutoencoderKL , UpperCAmelCase : KarrasDiffusionSchedulers , UpperCAmelCase : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=UpperCAmelCase , vae=UpperCAmelCase , scheduler=UpperCAmelCase )
# create a imagenet -> id dictionary for easier use
__lowerCamelCase : Optional[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowerCamelCase : List[str] = int(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = dict(sorted(self.labels.items() ) )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Union[str, List[str]] ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[str] = list(UpperCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
F"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : float = 4.0 , UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , ):
__lowerCamelCase : int = len(UpperCAmelCase )
__lowerCamelCase : Any = self.transformer.config.sample_size
__lowerCamelCase : Dict = self.transformer.config.in_channels
__lowerCamelCase : Any = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCAmelCase , device=self.device , dtype=self.transformer.dtype , )
__lowerCamelCase : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowerCamelCase : Optional[int] = torch.tensor(UpperCAmelCase , device=self.device ).reshape(-1 )
__lowerCamelCase : Optional[int] = torch.tensor([1000] * batch_size , device=self.device )
__lowerCamelCase : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowerCamelCase : str = latent_model_input[: len(UpperCAmelCase ) // 2]
__lowerCamelCase : Optional[Any] = torch.cat([half, half] , dim=0 )
__lowerCamelCase : Dict = self.scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = t
if not torch.is_tensor(UpperCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowerCamelCase : List[str] = latent_model_input.device.type == "mps"
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : List[Any] = torch.floataa if is_mps else torch.floataa
else:
__lowerCamelCase : Optional[Any] = torch.intaa if is_mps else torch.intaa
__lowerCamelCase : Union[str, Any] = torch.tensor([timesteps] , dtype=UpperCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowerCamelCase : Union[str, Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowerCamelCase : Union[str, Any] = self.transformer(
UpperCAmelCase , timestep=UpperCAmelCase , class_labels=UpperCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
__lowerCamelCase , __lowerCamelCase : str = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = torch.split(UpperCAmelCase , len(UpperCAmelCase ) // 2 , dim=0 )
__lowerCamelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowerCamelCase : Union[str, Any] = torch.cat([half_eps, half_eps] , dim=0 )
__lowerCamelCase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowerCamelCase , __lowerCamelCase : int = torch.split(UpperCAmelCase , UpperCAmelCase , dim=1 )
else:
__lowerCamelCase : int = noise_pred
# compute previous image: x_t -> x_t-1
__lowerCamelCase : Optional[Any] = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
if guidance_scale > 1:
__lowerCamelCase , __lowerCamelCase : List[str] = latent_model_input.chunk(2 , dim=0 )
else:
__lowerCamelCase : Optional[Any] = latent_model_input
__lowerCamelCase : Tuple = 1 / self.vae.config.scaling_factor * latents
__lowerCamelCase : Any = self.vae.decode(UpperCAmelCase ).sample
__lowerCamelCase : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : Optional[int] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : Dict = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCAmelCase )
| 366
| 1
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
SCREAMING_SNAKE_CASE__ : int = {
"""ctrl""": 2_56,
}
SCREAMING_SNAKE_CASE__ : Tuple = {
"""Pregnancy""": 16_86_29,
"""Christianity""": 76_75,
"""Explain""": 10_64_23,
"""Fitness""": 6_34_40,
"""Saving""": 6_31_63,
"""Ask""": 2_71_71,
"""Ass""": 9_59_85,
"""Joke""": 16_35_09,
"""Questions""": 4_56_22,
"""Thoughts""": 4_96_05,
"""Retail""": 5_23_42,
"""Feminism""": 16_43_38,
"""Writing""": 1_19_92,
"""Atheism""": 19_22_63,
"""Netflix""": 4_86_16,
"""Computing""": 3_96_39,
"""Opinion""": 4_32_13,
"""Alone""": 4_49_67,
"""Funny""": 5_89_17,
"""Gaming""": 4_03_58,
"""Human""": 40_88,
"""India""": 13_31,
"""Joker""": 7_71_38,
"""Diet""": 3_62_06,
"""Legal""": 1_18_59,
"""Norman""": 49_39,
"""Tip""": 7_26_89,
"""Weight""": 5_23_43,
"""Movies""": 4_62_73,
"""Running""": 2_34_25,
"""Science""": 20_90,
"""Horror""": 3_77_93,
"""Confession""": 6_05_72,
"""Finance""": 1_22_50,
"""Politics""": 1_63_60,
"""Scary""": 19_19_85,
"""Support""": 1_26_54,
"""Technologies""": 3_25_16,
"""Teenage""": 6_61_60,
"""Event""": 3_27_69,
"""Learned""": 6_74_60,
"""Notion""": 18_27_70,
"""Wikipedia""": 3_75_83,
"""Books""": 66_65,
"""Extract""": 7_60_50,
"""Confessions""": 10_27_01,
"""Conspiracy""": 7_59_32,
"""Links""": 6_36_74,
"""Narcissus""": 15_04_25,
"""Relationship""": 5_47_66,
"""Relationships""": 13_47_96,
"""Reviews""": 4_16_71,
"""News""": 42_56,
"""Translation""": 2_68_20,
"""multilingual""": 12_84_06,
}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = set()
__magic_name__ :Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ :int = char
__magic_name__ :List[str] = set(snake_case )
return pairs
class lowerCamelCase_ ( lowerCamelCase ):
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTROL_CODES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="<unk>" , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(unk_token=__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
__magic_name__ :List[Any] = json.load(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
__magic_name__ :Any = merges_handle.read().split('''\n''' )[1:-1]
__magic_name__ :Tuple = [tuple(merge.split() ) for merge in merges]
__magic_name__ :Optional[int] = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
__magic_name__ :List[Any] = {}
@property
def A ( self ):
"""simple docstring"""
return len(self.encoder )
def A ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__magic_name__ :str = tuple(__lowerCAmelCase )
__magic_name__ :List[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__magic_name__ :Optional[int] = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
__magic_name__ :List[Any] = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ :List[Any] = bigram
__magic_name__ :Tuple = []
__magic_name__ :Any = 0
while i < len(__lowerCAmelCase ):
try:
__magic_name__ :Dict = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ :Optional[Any] = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ :Dict = tuple(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
__magic_name__ :str = get_pairs(__lowerCAmelCase )
__magic_name__ :List[Any] = '''@@ '''.join(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = word[:-4]
__magic_name__ :Dict = word
return word
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Any = []
__magic_name__ :List[str] = re.findall(R'''\S+\n?''' , __lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[Any] = ''' '''.join(__lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ :int = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ :Dict = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' )
__magic_name__ :int = 0
with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__magic_name__ :Optional[Any] = token_index
writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 0
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
__snake_case = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
__snake_case = logging.get_logger(__name__)
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Any = '''maskformer'''
__UpperCAmelCase : List[str] = {'''hidden_size''': '''mask_feature_size'''}
__UpperCAmelCase : Optional[Any] = ['''resnet''', '''swin''']
__UpperCAmelCase : str = ['''detr''']
def __init__( self , UpperCamelCase__ = 256 , UpperCamelCase__ = 256 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 20.0 , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
snake_case : Tuple = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[Any] = backbone_config.pop("model_type" )
snake_case : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case : str = config_class.from_dict(UpperCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
snake_case : Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
snake_case : Optional[Any] = (
decoder_config.pop("model_type" ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Dict = CONFIG_MAPPING[decoder_type]
snake_case : List[Any] = config_class.from_dict(UpperCamelCase__ )
snake_case : Optional[int] = backbone_config
snake_case : Optional[Any] = decoder_config
# main feature dimension for the model
snake_case : List[str] = fpn_feature_size
snake_case : Tuple = mask_feature_size
# initializer
snake_case : str = init_std
snake_case : str = init_xavier_std
# Hungarian matcher && loss
snake_case : List[Any] = cross_entropy_weight
snake_case : int = dice_weight
snake_case : str = mask_weight
snake_case : Any = use_auxiliary_loss
snake_case : Any = no_object_weight
snake_case : List[Any] = output_auxiliary_logits
snake_case : Optional[int] = self.decoder_config.encoder_attention_heads
snake_case : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**UpperCamelCase__ )
@classmethod
def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return cls(
backbone_config=UpperCamelCase__ , decoder_config=UpperCamelCase__ , **UpperCamelCase__ , )
def lowerCamelCase ( self ) -> Dict[str, any]:
'''simple docstring'''
snake_case : int = copy.deepcopy(self.__dict__ )
snake_case : List[str] = self.backbone_config.to_dict()
snake_case : Union[str, Any] = self.decoder_config.to_dict()
snake_case : Optional[int] = self.__class__.model_type
return output
| 178
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''CLIPFeatureExtractor''']
__magic_name__ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 530
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""ConvNextFeatureExtractor"""]
lowercase : List[str] = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 423
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase=0.9_9_9 , _UpperCAmelCase="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCamelCase_: Union[str, Any] = []
for i in range(_UpperCAmelCase ):
lowerCamelCase_: Tuple = i / num_diffusion_timesteps
lowerCamelCase_: Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_A = [e.name for e in KarrasDiffusionSchedulers]
_A = 2
@register_to_config
def __init__( self : int , A_ : int = 10_00 , A_ : float = 0.00085 , A_ : float = 0.012 , A_ : str = "linear" , A_ : Optional[Union[np.ndarray, List[float]]] = None , A_ : str = "epsilon" , A_ : str = "linspace" , A_ : int = 0 , ) -> Any:
"""simple docstring"""
if trained_betas is not None:
lowerCamelCase_: Dict = torch.tensor(A_ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase_: Tuple = torch.linspace(A_ , A_ , A_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_: List[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_: Tuple = betas_for_alpha_bar(A_ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowerCamelCase_: str = 1.0 - self.betas
lowerCamelCase_: Any = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A_ , A_ , A_ )
def lowerCAmelCase ( self : List[Any] , A_ : str , A_ : Union[str, Any]=None ) -> int:
"""simple docstring"""
if schedule_timesteps is None:
lowerCamelCase_: Union[str, Any] = self.timesteps
lowerCamelCase_: Tuple = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowerCamelCase_: List[str] = 1 if len(A_ ) > 1 else 0
else:
lowerCamelCase_: int = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
lowerCamelCase_: List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase ( self : int , A_ : torch.FloatTensor , A_ : Union[float, torch.FloatTensor] , ) -> torch.FloatTensor:
"""simple docstring"""
lowerCamelCase_: List[Any] = self.index_for_timestep(A_ )
if self.state_in_first_order:
lowerCamelCase_: List[Any] = self.sigmas[step_index]
else:
lowerCamelCase_: Optional[Any] = self.sigmas_interpol[step_index]
lowerCamelCase_: Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase ( self : Any , A_ : int , A_ : Union[str, torch.device] = None , A_ : Optional[int] = None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_: List[str] = num_inference_steps
lowerCamelCase_: int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowerCamelCase_: int = np.linspace(0 , num_train_timesteps - 1 , A_ , dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowerCamelCase_: Dict = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_: List[Any] = (np.arange(0 , A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowerCamelCase_: Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_: Any = (np.arange(A_ , 0 , -step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
lowerCamelCase_: Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowerCamelCase_: Any = torch.from_numpy(np.log(A_ ) ).to(A_ )
lowerCamelCase_: Union[str, Any] = np.interp(A_ , np.arange(0 , len(A_ ) ) , A_ )
lowerCamelCase_: List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowerCamelCase_: Tuple = torch.from_numpy(A_ ).to(device=A_ )
# interpolate sigmas
lowerCamelCase_: List[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
lowerCamelCase_: List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
lowerCamelCase_: int = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(A_ ).startswith("""mps""" ):
# mps does not support float64
lowerCamelCase_: List[Any] = torch.from_numpy(A_ ).to(A_ , dtype=torch.floataa )
else:
lowerCamelCase_: Union[str, Any] = torch.from_numpy(A_ ).to(A_ )
# interpolate timesteps
lowerCamelCase_: Optional[int] = self.sigma_to_t(A_ ).to(A_ , dtype=timesteps.dtype )
lowerCamelCase_: Optional[int] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
lowerCamelCase_: str = torch.cat([timesteps[:1], interleaved_timesteps] )
lowerCamelCase_: Tuple = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowerCamelCase_: List[str] = defaultdict(A_ )
def lowerCAmelCase ( self : Tuple , A_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
# get log sigma
lowerCamelCase_: List[Any] = sigma.log()
# get distribution
lowerCamelCase_: Union[str, Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
lowerCamelCase_: Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
lowerCamelCase_: int = low_idx + 1
lowerCamelCase_: Optional[Any] = self.log_sigmas[low_idx]
lowerCamelCase_: Dict = self.log_sigmas[high_idx]
# interpolate sigmas
lowerCamelCase_: int = (low - log_sigma) / (low - high)
lowerCamelCase_: Optional[int] = w.clamp(0 , 1 )
# transform interpolation to time range
lowerCamelCase_: Any = (1 - w) * low_idx + w * high_idx
lowerCamelCase_: Dict = t.view(sigma.shape )
return t
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.sample is None
def lowerCAmelCase ( self : List[Any] , A_ : Union[torch.FloatTensor, np.ndarray] , A_ : Union[float, torch.FloatTensor] , A_ : Union[torch.FloatTensor, np.ndarray] , A_ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_: int = self.index_for_timestep(A_ )
# advance index counter by 1
lowerCamelCase_: List[str] = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowerCamelCase_: List[str] = self.sigmas[step_index]
lowerCamelCase_: int = self.sigmas_interpol[step_index + 1]
lowerCamelCase_: int = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
lowerCamelCase_: Union[str, Any] = self.sigmas[step_index - 1]
lowerCamelCase_: List[Any] = self.sigmas_interpol[step_index]
lowerCamelCase_: str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowerCamelCase_: List[str] = 0
lowerCamelCase_: int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowerCamelCase_: str = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase_: int = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_: Tuple = sigma_hat if self.state_in_first_order else sigma_interpol
lowerCamelCase_: Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowerCamelCase_: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowerCamelCase_: Any = sigma_interpol - sigma_hat
# store for 2nd order step
lowerCamelCase_: str = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
lowerCamelCase_: Dict = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
lowerCamelCase_: List[Any] = sigma_next - sigma_hat
lowerCamelCase_: str = self.sample
lowerCamelCase_: Tuple = None
lowerCamelCase_: str = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def lowerCAmelCase ( self : Tuple , A_ : torch.FloatTensor , A_ : torch.FloatTensor , A_ : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCamelCase_: List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
lowerCamelCase_: int = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowerCamelCase_: Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowerCamelCase_: int = self.timesteps.to(original_samples.device )
lowerCamelCase_: Dict = timesteps.to(original_samples.device )
lowerCamelCase_: Optional[Any] = [self.index_for_timestep(A_ , A_ ) for t in timesteps]
lowerCamelCase_: Tuple = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowerCamelCase_: List[Any] = sigma.unsqueeze(-1 )
lowerCamelCase_: List[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 423
| 1
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = name
snake_case__ : Any = val
def __str__( self) -> Tuple:
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , lowerCamelCase__) -> Any:
'''simple docstring'''
return self.val < other.val
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self , lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = {}
snake_case__ : List[str] = {}
snake_case__ : Optional[int] = self.build_heap(lowerCamelCase__)
def __getitem__( self , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
return self.get_value(lowerCamelCase__)
def UpperCAmelCase ( self , lowerCamelCase__) -> List[str]:
'''simple docstring'''
return (idx - 1) // 2
def UpperCAmelCase ( self , lowerCamelCase__) -> int:
'''simple docstring'''
return idx * 2 + 1
def UpperCAmelCase ( self , lowerCamelCase__) -> Dict:
'''simple docstring'''
return idx * 2 + 2
def UpperCAmelCase ( self , lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
return self.heap_dict[key]
def UpperCAmelCase ( self , lowerCamelCase__) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = len(lowerCamelCase__) - 1
snake_case__ : Any = self.get_parent_idx(lowerCamelCase__)
for idx, i in enumerate(lowerCamelCase__):
snake_case__ : List[str] = idx
snake_case__ : int = i.val
for i in range(lowerCamelCase__ , -1 , -1):
self.sift_down(lowerCamelCase__ , lowerCamelCase__)
return array
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> int:
'''simple docstring'''
while True:
snake_case__ : Union[str, Any] = self.get_left_child_idx(lowerCamelCase__) # noqa: E741
snake_case__ : Optional[int] = self.get_right_child_idx(lowerCamelCase__)
snake_case__ : str = idx
if l < len(lowerCamelCase__) and array[l] < array[idx]:
snake_case__ : Optional[Any] = l
if r < len(lowerCamelCase__) and array[r] < array[smallest]:
snake_case__ : Tuple = r
if smallest != idx:
snake_case__, snake_case__ : str = array[smallest], array[idx]
(
(
snake_case__
), (
snake_case__
),
) : Any = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
snake_case__ : str = smallest
else:
break
def UpperCAmelCase ( self , lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
snake_case__ : int = self.get_parent_idx(lowerCamelCase__)
while p >= 0 and self.heap[p] > self.heap[idx]:
snake_case__, snake_case__ : Optional[int] = self.heap[idx], self.heap[p]
snake_case__, snake_case__ : Any = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
snake_case__ : Union[str, Any] = p
snake_case__ : Optional[int] = self.get_parent_idx(lowerCamelCase__)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return self.heap[0]
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
snake_case__, snake_case__ : Optional[Any] = self.heap[-1], self.heap[0]
snake_case__, snake_case__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
snake_case__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def UpperCAmelCase ( self , lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
self.heap.append(lowerCamelCase__)
snake_case__ : str = len(self.heap) - 1
snake_case__ : Optional[int] = node.val
self.sift_up(len(self.heap) - 1)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return len(self.heap) == 0
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> Any:
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
snake_case__ : Tuple = new_value
snake_case__ : Tuple = new_value
self.sift_up(self.idx_of_element[node])
lowercase = Node("""R""", -1)
lowercase = Node("""B""", 6)
lowercase = Node("""A""", 3)
lowercase = Node("""X""", 1)
lowercase = Node("""E""", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
lowercase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("""Min Heap - before decrease key""")
for i in my_min_heap.heap:
print(i)
print("""Min Heap - After decrease key of node [B -> -17]""")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = path_or_paths
snake_case__ : Any = split if split or isinstance(lowerCamelCase__ , lowerCamelCase__) else "train"
snake_case__ : Union[str, Any] = features
snake_case__ : str = cache_dir
snake_case__ : Dict = keep_in_memory
snake_case__ : Dict = streaming
snake_case__ : List[str] = num_proc
snake_case__ : Any = kwargs
@abstractmethod
def UpperCAmelCase ( self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = features
snake_case__ : Any = cache_dir
snake_case__ : Any = keep_in_memory
snake_case__ : str = streaming
snake_case__ : List[str] = num_proc
snake_case__ : List[str] = kwargs
@abstractmethod
def UpperCAmelCase ( self) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 150
| 1
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __UpperCamelCase :
def __init__( self ):
_UpperCAmelCase = ''''''
_UpperCAmelCase = ''''''
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 256
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = cva.imread(_UpperCamelCase , 0 )
_UpperCAmelCase = copy.deepcopy(self.img )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
_UpperCAmelCase = np.sum(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
_UpperCAmelCase = x[i] / self.k
self.sk += prk
_UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
_UpperCAmelCase = int(last % last )
_UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_UpperCamelCase )
_UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
_UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
_UpperCAmelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def UpperCamelCase( self ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCAmelCase_ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
UpperCAmelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 32
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_MASKED_LM_MAPPING
snake_case_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase_ ( self ) -> str:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
__lowerCamelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38_015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25_506, 'token_str': ' accuser'},
] , )
__lowerCamelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38_015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25_506,
'token_str': ' accuser',
},
] , )
__lowerCamelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
__lowerCamelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35_676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16_416, 'token_str': 'ELS'},
] , )
__lowerCamelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35_676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16_416, 'token_str': 'ELS'},
] , )
__lowerCamelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2_941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13_606, 'token_str': ' Clara'},
] , )
__lowerCamelCase = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
__lowerCamelCase = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_torch
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(lowerCamelCase__ )
@slow
@require_tf
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{'sequence': 'My name is John', 'score': 0.0_08, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_07, 'token': 1_573, 'token_str': ' Chris'},
] , )
__lowerCamelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_51,
'token': 2_201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_14,
'token': 12_790,
'token_str': ' Lyon',
},
] , )
__lowerCamelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_05, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_00, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_00, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
__lowerCamelCase = None
__lowerCamelCase = None
self.run_pipeline_test(lowerCamelCase__ , [] )
@require_tf
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
__lowerCamelCase = None
__lowerCamelCase = None
self.run_pipeline_test(lowerCamelCase__ , [] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = fill_masker.tokenizer
__lowerCamelCase = fill_masker.model
__lowerCamelCase = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
lowerCamelCase__ , [
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
] , )
with self.assertRaises(lowerCamelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCamelCase__ ):
fill_masker('This is' )
self.run_test_top_k(lowerCamelCase__ , lowerCamelCase__ )
self.run_test_targets(lowerCamelCase__ , lowerCamelCase__ )
self.run_test_top_k_targets(lowerCamelCase__ , lowerCamelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase__ , lowerCamelCase__ )
self.fill_mask_with_multiple_masks(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = tokenizer.get_vocab()
__lowerCamelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ , targets=lowerCamelCase__ )
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , lowerCamelCase__ )
__lowerCamelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(lowerCamelCase__ ) )
# Call argument
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , lowerCamelCase__ )
__lowerCamelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(lowerCamelCase__ ) )
# Score equivalence
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
__lowerCamelCase = [top_mask['token_str'] for top_mask in outputs]
__lowerCamelCase = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase__ ) == set(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
__lowerCamelCase = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
# Raises with invalid
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ , top_k=2 )
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = tokenizer.get_vocab()
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# top_k=2, ntargets=3
__lowerCamelCase = sorted(vocab.keys() )[:3]
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=lowerCamelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__lowerCamelCase = [el['token_str'] for el in sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x["score"] , reverse=lowerCamelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase__ ).issubset(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=lowerCamelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
__lowerCamelCase = sorted(vocab.keys() )[:3]
__lowerCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__lowerCamelCase = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=lowerCamelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCamelCase__ ) , 3 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
] , )
| 469
| 0
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __a ( A , A ) -> Union[str, Any]:
'''simple docstring'''
A__ = torch.load(A , map_location="cpu" )
A__ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
A__ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
A__ = v
else:
A__ = v
A__ = chkpt["params"]
A__ = {n: v for n, v in config.items() if not isinstance(A , (torch.FloatTensor, numpy.ndarray) )}
A__ = chkpt["dico_word2id"]
A__ = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
A__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
A__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
A__ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(A , A )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A , indent=2 ) + "\n" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A , indent=2 ) + "\n" )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCAmelCase =parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 261
|
"""simple docstring"""
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
A__ = set()
# Replace all the whitespace in our sentence
A__ = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(A ) == 26
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
A__ = [False] * 26
for char in input_str:
if char.islower():
A__ = True
elif char.isupper():
A__ = True
return all(A )
def __a ( A = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
A__ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=A ) )
print(timeit("is_pangram_faster()" , setup=A ) )
print(timeit("is_pangram_fastest()" , setup=A ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 261
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
A__ : Any = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
A__ : Tuple = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase__ ( ) -> Optional[int]:
__lowerCamelCase : Tuple = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__lowerCamelCase : str = bs[:]
__lowerCamelCase : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase : Union[str, Any] = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = set()
__lowerCamelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : List[str] = char
return pairs
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
__lowerCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
__lowerCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
__lowerCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase : Optional[Any] = json.load(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : List[str] = errors # how to handle errors in decoding
__lowerCamelCase : Dict = bytes_to_unicode()
__lowerCamelCase : Optional[int] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as merges_handle:
__lowerCamelCase : Union[str, Any] = merges_handle.read().split('\n' )[1:-1]
__lowerCamelCase : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase : List[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase : Dict = {}
__lowerCamelCase : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase : Tuple = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowercase_ ( self ) -> Dict:
return len(self.encoder )
def lowercase_ ( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if token in self.cache:
return self.cache[token]
__lowerCamelCase : str = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
__lowerCamelCase : str = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase : Dict = bigram
__lowerCamelCase : List[str] = []
__lowerCamelCase : int = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
__lowerCamelCase : Optional[int] = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase : int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase : Tuple = tuple(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
__lowerCamelCase : str = get_pairs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = ' '.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = word
return word
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Dict = ''.join(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
__lowerCamelCase : Dict = 0
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase : List[Any] = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : List[str] = [self.cls_token_id]
__lowerCamelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Dict = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
__lowerCamelCase : List[str] = ' ' + text
return (text, kwargs)
| 13
|
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __UpperCAmelCase ( A : Dict ) -> Dict:
UpperCAmelCase_ : Optional[int] = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __UpperCAmelCase ( A : Optional[int] , A : List[str] ) -> Dict:
UpperCAmelCase_ : List[Any] = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __UpperCAmelCase ( A : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Dict = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def __UpperCAmelCase ( ) -> Tuple:
UpperCAmelCase_ : List[str] = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __UpperCAmelCase ( A : Optional[Any] , A : Dict , A : Optional[Any] , A : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : Dict = 1_0_0_0
UpperCAmelCase_ : List[Any] = '''huggingface/label-files'''
UpperCAmelCase_ : Optional[Any] = num_labels
UpperCAmelCase_ : Optional[int] = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase_ : Optional[int] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Union[str, Any] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
UpperCAmelCase_ : Union[str, Any] = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
UpperCAmelCase_ : int = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase_ : Dict = [2, 2, 2_0]
UpperCAmelCase_ : Optional[int] = [3, 1_2, 1_6]
UpperCAmelCase_ : List[str] = [1_9_2, 7_6_8, 1_0_2_4]
UpperCAmelCase_ : Union[str, Any] = CvtForImageClassification(_UpperCAmelCase )
UpperCAmelCase_ : Dict = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCAmelCase_ : List[Any] = image_size
UpperCAmelCase_ : int = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
UpperCAmelCase_ : Tuple = OrderedDict()
UpperCAmelCase_ : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase_ : List[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
UpperCAmelCase_ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
UpperCAmelCase_ : List[Any] = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ : Dict = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
UpperCAmelCase_ : Tuple = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCamelCase : Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 716
|
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( A : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def __UpperCAmelCase ( A : np.array ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.dummy_uncond_unet
A__ = DDIMScheduler()
A__ = self.dummy_vq_model
A__ = LDMPipeline(unet=UpperCAmelCase__ , vqvae=UpperCAmelCase__ , scheduler=UpperCAmelCase__)
ldm.to(UpperCAmelCase__)
ldm.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = torch.manual_seed(0)
A__ = ldm(generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''numpy''').images
A__ = torch.manual_seed(0)
A__ = ldm(generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''numpy''' , return_dict=UpperCAmelCase__)[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
A__ = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''')
ldm.to(UpperCAmelCase__)
ldm.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = torch.manual_seed(0)
A__ = ldm(generator=UpperCAmelCase__ , num_inference_steps=5 , output_type='''numpy''').images
A__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
A__ = 1e-2 if torch_device != '''mps''' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 87
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668
| 0
|
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
_UpperCamelCase : Dict = '<<<<<<< This should probably be modified because it mentions: '
_UpperCamelCase : Optional[int] = '=======\n>>>>>>>\n'
_UpperCamelCase : Dict = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
_UpperCamelCase : List[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def snake_case (A_ :Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class snake_case ( UpperCAmelCase ):
@staticmethod
def lowerCamelCase__ ( A : ArgumentParser ):
'''simple docstring'''
a : List[str] = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=A , required=A , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=A , required=A , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=A )
def __init__( self : Union[str, Any] , A : str , A : str , *A : List[Any] ):
'''simple docstring'''
a : Tuple = get_logger('datasets-cli/converting' )
a : int = tfds_path
a : Dict = datasets_directory
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
a : List[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a : str = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
a : Optional[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a : Any = []
a : List[Any] = []
a : str = {}
if os.path.isdir(self._tfds_path ):
a : int = os.listdir(A )
else:
a : Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
a : Optional[int] = os.path.join(A , A )
a : Any = os.path.join(A , A )
if not os.path.isfile(A ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(A , encoding='utf-8' ) as f:
a : Union[str, Any] = f.readlines()
a : List[Any] = []
a : Optional[int] = False
a : str = False
a : Optional[int] = []
for line in lines:
a : Tuple = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a : List[str] = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
a : str = ''
continue
elif "from absl import logging" in out_line:
a : Dict = 'from datasets import logging\n'
elif "getLogger" in out_line:
a : str = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a : Union[str, Any] = True
a : int = list(filter(lambda A : e in out_line , A ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(A ) + '\n' )
out_lines.append(A )
out_lines.append(A )
continue
else:
for pattern, replacement in TO_CONVERT:
a : str = re.sub(A , A , A )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a : str = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , A )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
a : str = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a : List[Any] = True
out_lines.append(A )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a : Tuple = f_name.replace('.py' , '' )
a : Union[str, Any] = os.path.join(A , A )
a : Dict = os.path.join(A , A )
os.makedirs(A , exist_ok=A )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(A )
if needs_manual_update:
with_manual_update.append(A )
with open(A , 'w' , encoding='utf-8' ) as f:
f.writelines(A )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a : str = os.path.basename(A )
a : List[str] = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(A , A )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 118
|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 118
| 1
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
_UpperCamelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__A : List[Any] = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__A : Optional[Any] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__A : Dict = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__A : Optional[Any] = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__A : Optional[Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__A : Optional[int] = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__A : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__A : Union[str, Any] = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : str = {}
import re
__A : Optional[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__A : Dict = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__A : Tuple = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__A : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__A : Optional[int] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__A : Union[str, Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__A : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__A : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__A : Any = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE ):
__A : int = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE )
__A : Tuple = regex_match.groups()
__A : int = int(groups[2] ) * 2 + int(groups[3] )
__A : int = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
__A : Optional[int] = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
__A : int = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE )
__A : Any = regex_match.groups()
__A : Any = int(groups[2] ) * 2 + int(groups[3] )
__A : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
__A : str = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
__A : Union[str, Any] = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__A : Tuple = prefix + resnet_block
__A : str = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE ):
__A : int = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE )
__A : List[str] = regex_match.groups()
__A : Dict = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
__A : str = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE ):
__A : Any = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE )
__A : Tuple = regex_match.groups()
__A : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Tuple = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
__A : Union[str, Any] = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
__A : Dict = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE )
__A : Any = regex_match.groups()
__A : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
__A : Optional[int] = {'1': 1, '3': 2}[groups[-2]]
__A : int = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
__A : Tuple = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__A : Any = prefix + resnet_block
__A : Dict = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE ):
__A : List[str] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE )
__A : int = regex_match.groups()
__A : Any = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
__A : List[str] = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE ):
__A : Union[str, Any] = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE )
__A : str = regex_match.groups()
__A : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : int = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
__A : List[Any] = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE ):
__A : Optional[int] = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE )
__A : Dict = regex_match.groups()
__A : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
__A : Any = {'1': 1, '3': 2}[groups[-2]]
__A : List[str] = f"conditioner_blocks.upsampler.upsample_block.{block_index}."
__A : Union[str, Any] = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__A : int = prefix + resnet_block
__A : Tuple = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE ):
__A : Union[str, Any] = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE )
__A : Dict = regex_match.groups()
__A : str = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
__A : Tuple = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# keep original key
else:
__A : Tuple = original_key
__A : List[str] = replace_key(SCREAMING_SNAKE_CASE )
if f"{key_prefix}.{key}" not in model_state_dict or key is None:
print(f"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape:
__A : List[str] = model_state_dict[f"{key_prefix}.{key}"]
print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
__A : Optional[Any] = original_key
__A : str = original_key
__A : Optional[Any] = value
return new_dict
@torch.no_grad()
def _lowercase (SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
__A : int = requests.get(f"{PREFIX}{file}" , allow_redirects=SCREAMING_SNAKE_CASE )
os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=SCREAMING_SNAKE_CASE )
open(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
__A : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
__A : Dict = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__A : List[Any] = JukeboxModel(SCREAMING_SNAKE_CASE )
__A : Any = []
__A : Optional[int] = {}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE ):
__A : List[Any] = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
__A : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__A : Optional[Any] = old_dic[k]
elif k.endswith(".w" ):
__A : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__A : Dict = old_dic[k]
else:
__A : Optional[int] = old_dic[k]
__A : List[Any] = 'vqvae' if i == 0 else f"priors.{3 - i}"
__A : Any = fix_jukebox_keys(SCREAMING_SNAKE_CASE , model.state_dict() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
weight_dict.append(SCREAMING_SNAKE_CASE )
__A : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
with open(f"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
return weight_dict
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""jukebox-5b-lyrics""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""jukebox-5b-lyrics-converted""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
_UpperCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 111
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A : List[Any] = False
try:
A : List[Any] = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : list = []):
_A : Optional[Any] = 0
_A : Dict = choices
_A : int = prompt
if sys.platform == "win32":
_A : List[str] = '*'
else:
_A : Dict = '➔ '
def A ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = ""):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , SCREAMING_SNAKE_CASE)
else:
forceWrite(self.choices[index] , SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : int):
if index == self.position:
forceWrite(F' {self.arrow_char} ')
self.write_choice(SCREAMING_SNAKE_CASE)
else:
forceWrite(F' {self.choices[index]}')
reset_cursor()
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Direction , SCREAMING_SNAKE_CASE : int = 1):
_A : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(SCREAMING_SNAKE_CASE)
move_cursor(SCREAMING_SNAKE_CASE , direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP['up'])
def A ( self : Union[str, Any]):
self.move_direction(Direction.UP)
@input.mark(KEYMAP['down'])
def A ( self : Union[str, Any]):
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP['newline'])
def A ( self : Dict):
move_cursor(len(self.choices) - self.position , 'DOWN')
return self.position
@input.mark(KEYMAP['interrupt'])
def A ( self : int):
move_cursor(len(self.choices) - self.position , 'DOWN')
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(SCREAMING_SNAKE_CASE)] for number in range(10)])
def A ( self : Dict):
_A : Any = int(chr(self.current_selection))
_A : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP , -movement)
elif self.position < index:
self.move_direction(Direction.DOWN , SCREAMING_SNAKE_CASE)
else:
return
else:
return
def A ( self : int , SCREAMING_SNAKE_CASE : int = 0):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n')
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n')
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n')
_A : str = default_choice
for i in range(len(self.choices)):
self.print_choice(SCREAMING_SNAKE_CASE)
forceWrite('\n')
move_cursor(len(self.choices) - self.position , 'UP')
with cursor.hide():
while True:
if in_colab:
try:
_A : str = int(builtins.input())
except ValueError:
_A : Any = default_choice
else:
_A : Any = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1 , 'UP')
clear_line()
self.write_choice(SCREAMING_SNAKE_CASE , '\n')
return choice
| 128
| 0
|
'''simple docstring'''
lowercase : str = {str(digit): digit**5 for digit in range(10)}
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(snake_case__ ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(snake_case__ ) )
if __name__ == "__main__":
print(solution())
| 718
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A ( unittest.TestCase ):
__magic_name__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__magic_name__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__magic_name__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__magic_name__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Any = ZeroShotClassificationPipeline(
model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# No kwarg
A : Dict = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
A : str = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Optional[int] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A : Any = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(SCREAMING_SNAKE_CASE , {'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE )]} )
# https://github.com/huggingface/transformers/issues/13846
A : List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(1 )
] , )
A : Dict = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{'''sequence''': ANY(SCREAMING_SNAKE_CASE ), '''labels''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )], '''scores''': [ANY(SCREAMING_SNAKE_CASE ), ANY(SCREAMING_SNAKE_CASE )]}
for i in range(2 )
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(SCREAMING_SNAKE_CASE , candidate_labels='''politics''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=SCREAMING_SNAKE_CASE , )
self.run_entailment_id(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : List[Any] = zero_shot_classifier.model.config
A : int = config.labelaid
A : Union[str, Any] = zero_shot_classifier.entailment_id
A : str = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A : Optional[Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A : List[str] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A : Any = original_labelaid
self.assertEqual(SCREAMING_SNAKE_CASE , zero_shot_classifier.entailment_id )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 100 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Tuple = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
A : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
A : Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : str = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
A : Tuple = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : List[str] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
A : List[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A : Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=SCREAMING_SNAKE_CASE , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 343
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = DanceDiffusionPipeline
a__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a__ = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
a__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a__ = False
a__ = False
def lowerCAmelCase_ (self ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowercase__ , use_timestep_embedding=lowercase__ , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
__UpperCAmelCase = IPNDMScheduler()
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def lowerCAmelCase_ (self , lowercase__ , lowercase__=0 ) -> Dict:
if str(lowercase__ ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
__UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = DanceDiffusionPipeline(**lowercase__ )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = self.get_dummy_inputs(lowercase__ )
__UpperCAmelCase = pipe(**lowercase__ )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase_ (self ) -> Union[str, Any]:
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase_ (self ) -> List[str]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def lowerCAmelCase_ (self ) -> Optional[int]:
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase_ (self ) -> Any:
return super().test_attention_slicing_forward_pass()
def lowerCAmelCase_ (self ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = torch_device
__UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(generator=lowercase__ , num_inference_steps=100 , audio_length_in_s=4.096 )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = torch_device
__UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__UpperCAmelCase = torch.manual_seed(0 )
__UpperCAmelCase = pipe(generator=lowercase__ , num_inference_steps=100 , audio_length_in_s=4.096 )
__UpperCAmelCase = output.audios
__UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 303
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 303
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
def a ( UpperCamelCase_ : Dict ) -> Union[str, Any]:
snake_case__ =ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case__ =128
elif "12-12" in model_name:
snake_case__ =12
snake_case__ =12
elif "14-14" in model_name:
snake_case__ =14
snake_case__ =14
elif "16-16" in model_name:
snake_case__ =16
snake_case__ =16
else:
raise ValueError('Model not supported' )
snake_case__ ='''huggingface/label-files'''
if "speech-commands" in model_name:
snake_case__ =35
snake_case__ ='''speech-commands-v2-id2label.json'''
else:
snake_case__ =527
snake_case__ ='''audioset-id2label.json'''
snake_case__ =json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
snake_case__ ={int(a_ ): v for k, v in idalabel.items()}
snake_case__ =idalabel
snake_case__ ={v: k for k, v in idalabel.items()}
return config
def a ( UpperCamelCase_ : List[str] ) -> Any:
if "module.v" in name:
snake_case__ =name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
snake_case__ =name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
snake_case__ =name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
snake_case__ =name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
snake_case__ =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
snake_case__ =name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
snake_case__ =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
snake_case__ =name.replace('attn' , 'attention.self' )
if "norm1" in name:
snake_case__ =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
snake_case__ =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
snake_case__ =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
snake_case__ =name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case__ =name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
snake_case__ =name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
snake_case__ =name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def a ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ) -> Dict:
for key in orig_state_dict.copy().keys():
snake_case__ =orig_state_dict.pop(a_ )
if "qkv" in key:
snake_case__ =key.split('.' )
snake_case__ =int(key_split[3] )
snake_case__ =config.hidden_size
if "weight" in key:
snake_case__ =val[:dim, :]
snake_case__ =val[dim : dim * 2, :]
snake_case__ =val[-dim:, :]
else:
snake_case__ =val[:dim]
snake_case__ =val[dim : dim * 2]
snake_case__ =val[-dim:]
else:
snake_case__ =val
return orig_state_dict
def a ( UpperCamelCase_ : str ) -> Optional[int]:
snake_case__ =[
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(a_ , a_ )
@torch.no_grad()
def a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=False ) -> Tuple:
snake_case__ =get_audio_spectrogram_transformer_config(a_ )
snake_case__ ={
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
snake_case__ =model_name_to_url[model_name]
snake_case__ =torch.hub.load_state_dict_from_url(a_ , map_location='cpu' )
# remove some keys
remove_keys(a_ )
# rename some keys
snake_case__ =convert_state_dict(a_ , a_ )
# load 🤗 model
snake_case__ =ASTForAudioClassification(a_ )
model.eval()
model.load_state_dict(a_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case__ =-4.2_6_7_7_3_9_3 if '''speech-commands''' not in model_name else -6.8_4_5_9_7_8
snake_case__ =4.5_6_8_9_9_7_4 if '''speech-commands''' not in model_name else 5.5_6_5_4_5_2_6
snake_case__ =1024 if '''speech-commands''' not in model_name else 128
snake_case__ =ASTFeatureExtractor(mean=a_ , std=a_ , max_length=a_ )
if "speech-commands" in model_name:
snake_case__ =load_dataset('speech_commands' , 'v0.02' , split='validation' )
snake_case__ =dataset[0]['''audio''']['''array''']
else:
snake_case__ =hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
snake_case__ =torchaudio.load(a_ )
snake_case__ =waveform.squeeze().numpy()
snake_case__ =feature_extractor(a_ , sampling_rate=16000 , return_tensors='pt' )
# forward pass
snake_case__ =model(**a_ )
snake_case__ =outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case__ =torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case__ =torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case__ =torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case__ =torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case__ =torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case__ =torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case__ =torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case__ =torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , a_ , atol=1e-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(a_ ).mkdir(exist_ok=a_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a_ )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(a_ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 718
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class a__( snake_case__ ):
a_ : Dict = '''pix2struct_text_model'''
a_ : Optional[int] = ['''past_key_values''']
a_ : int = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=5_0244 , _UpperCAmelCase=768 , _UpperCAmelCase=64 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=1.0 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0 , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =d_kv
snake_case__ =d_ff
snake_case__ =num_layers
snake_case__ =num_heads
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =dropout_rate
snake_case__ =layer_norm_epsilon
snake_case__ =initializer_factor
snake_case__ =use_cache
snake_case__ =eos_token_id
snake_case__ =decoder_start_token_id
# for backwards compatibility
snake_case__ =dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : List[Any] = '''pix2struct_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=768 , _UpperCAmelCase=2048 , _UpperCAmelCase=64 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=1.0 , _UpperCAmelCase=4096 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , **_UpperCAmelCase , ) -> int:
super().__init__(**_UpperCAmelCase )
snake_case__ =hidden_size
snake_case__ =patch_embed_hidden_size
snake_case__ =d_ff
snake_case__ =dropout_rate
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =initializer_range
snake_case__ =initializer_factor
snake_case__ =attention_dropout
snake_case__ =layer_norm_eps
snake_case__ =dense_act_fn
snake_case__ =seq_len
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =d_kv
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : Dict = '''pix2struct'''
a_ : Optional[int] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
snake_case__ ={}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
snake_case__ ={}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
snake_case__ =PixaStructTextConfig(**_UpperCAmelCase )
snake_case__ =PixaStructVisionConfig(**_UpperCAmelCase )
snake_case__ =self.text_config.decoder_start_token_id
snake_case__ =self.text_config.pad_token_id
snake_case__ =self.text_config.eos_token_id
snake_case__ =initializer_factor
snake_case__ =initializer_range
snake_case__ =self.initializer_range
snake_case__ =self.initializer_range
snake_case__ =is_vqa
@classmethod
def _lowercase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =copy.deepcopy(self.__dict__ )
snake_case__ =self.text_config.to_dict()
snake_case__ =self.vision_config.to_dict()
snake_case__ =self.__class__.model_type
return output
| 581
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : Optional[int] = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 410
| 0
|
from math import isqrt
def _lowerCamelCase ( __A : str ) -> list[int]:
_UpperCAmelCase : Optional[int] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __A , __A ):
_UpperCAmelCase : int = False
return [i for i in range(2 , __A ) if is_prime[i]]
def _lowerCamelCase ( __A : Any = 10**8 ) -> int:
_UpperCAmelCase : List[str] = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase : Optional[Any] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Optional[Any] = len(__A ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }')
| 703
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
SCREAMING_SNAKE_CASE = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
SCREAMING_SNAKE_CASE = [0, 25, 50]
SCREAMING_SNAKE_CASE = [25, 50, 75]
SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
SCREAMING_SNAKE_CASE = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
SCREAMING_SNAKE_CASE = np.ones(75)
SCREAMING_SNAKE_CASE = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
SCREAMING_SNAKE_CASE = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
SCREAMING_SNAKE_CASE = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
SCREAMING_SNAKE_CASE = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 186
| 0
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
A__ : List[str] = ['text', 'image', 'audio']
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
inputs.append(create_inputs(lowerCamelCase_ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def a ( lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
for output in outputs:
if isinstance(lowerCamelCase_ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(lowerCamelCase_ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(lowerCamelCase_ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class _UpperCAmelCase :
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, '''inputs''' ) )
self.assertTrue(hasattr(self.tool, '''outputs''' ) )
lowercase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input, lowerCamelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowercase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = create_inputs(self.tool.inputs )
lowercase__ = self.tool(*lowerCamelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
lowercase__ = [outputs]
self.assertListEqual(output_types(lowerCamelCase ), self.tool.outputs )
def lowercase__ ( self : str ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, '''description''' ) )
self.assertTrue(hasattr(self.tool, '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = create_inputs(self.tool.inputs )
lowercase__ = self.tool(*lowerCamelCase )
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [outputs]
self.assertEqual(len(lowerCamelCase ), len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase, self.tool.outputs ):
lowercase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase, lowerCamelCase ) )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase__ = create_inputs(self.tool.inputs )
lowercase__ = []
for _input, input_type in zip(lowerCamelCase, self.tool.inputs ):
if isinstance(lowerCamelCase, lowerCamelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowercase__ = self.tool(*lowerCamelCase )
if not isinstance(lowerCamelCase, lowerCamelCase ):
lowercase__ = [outputs]
self.assertEqual(len(lowerCamelCase ), len(self.tool.outputs ) )
| 183
|
from jiwer import compute_measures
import datasets
A__ : Tuple = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A__ : Optional[int] = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A__ : Dict = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': datasets.Value('''string''', id='''sequence''' ),
'''references''': datasets.Value('''string''', id='''sequence''' ),
} ), codebase_urls=['''https://github.com/jitsi/jiwer/'''], reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
], )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : str=None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : List[str]=False ):
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCamelCase, lowerCamelCase )["wer"]
else:
lowercase__ = 0
lowercase__ = 0
for prediction, reference in zip(lowerCamelCase, lowerCamelCase ):
lowercase__ = compute_measures(lowerCamelCase, lowerCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 183
| 1
|
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _snake_case : float , _snake_case : float , _snake_case : float ):
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def snake_case__ ( _snake_case : float , _snake_case : float , _snake_case : float , ):
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def snake_case__ ( _snake_case : float , _snake_case : float , _snake_case : float , ):
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
_snake_case , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304
|
"""simple docstring"""
class lowerCAmelCase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCamelCase_ :list ) -> None:
"""simple docstring"""
UpperCamelCase__ = set_counts
UpperCamelCase__ = max(lowerCamelCase_ )
UpperCamelCase__ = len(lowerCamelCase_ )
UpperCamelCase__ = [1] * num_sets
UpperCamelCase__ = list(range(lowerCamelCase_ ) )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :int ) -> bool:
"""simple docstring"""
UpperCamelCase__ = self.get_parent(lowerCamelCase_ )
UpperCamelCase__ = self.get_parent(lowerCamelCase_ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ = 0
UpperCamelCase__ = src_parent
UpperCamelCase__ = self.set_counts[src_parent]
UpperCamelCase__ = max(self.max_set , lowerCamelCase_ )
return True
def lowerCamelCase__ ( self :int , lowerCamelCase_ :int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 304
| 1
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCamelCase : Any = '''CompVis/stable-diffusion-v1-1'''
__UpperCamelCase : Union[str, Any] = '''CompVis/stable-diffusion-v1-2'''
__UpperCamelCase : Optional[int] = '''CompVis/stable-diffusion-v1-3'''
__UpperCamelCase : Optional[Any] = '''CompVis/stable-diffusion-v1-4'''
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int ,lowercase_ : AutoencoderKL ,lowercase_ : CLIPTextModel ,lowercase_ : CLIPTokenizer ,lowercase_ : UNetaDConditionModel ,lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,lowercase_ : StableDiffusionSafetyChecker ,lowercase_ : CLIPImageProcessor ,lowercase_ : bool = True ,):
super()._init_()
lowerCAmelCase__ : Optional[int] = StableDiffusionPipeline.from_pretrained(lowercase_ )
lowerCAmelCase__ : List[str] = StableDiffusionPipeline.from_pretrained(lowercase_ )
lowerCAmelCase__ : int = StableDiffusionPipeline.from_pretrained(lowercase_ )
lowerCAmelCase__ : List[str] = StableDiffusionPipeline(
vae=lowercase_ ,text_encoder=lowercase_ ,tokenizer=lowercase_ ,unet=lowercase_ ,scheduler=lowercase_ ,safety_checker=lowercase_ ,feature_extractor=lowercase_ ,requires_safety_checker=lowercase_ ,)
self.register_modules(pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea ,pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self : int ):
return {k: getattr(self ,lowercase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def __lowerCAmelCase ( self : int ,lowercase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def __lowerCAmelCase ( self : str ):
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Union[str, List[str]] ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_0 ,lowercase_ : float = 7.5 ,lowercase_ : Optional[Union[str, List[str]]] = None ,lowercase_ : Optional[int] = 1 ,lowercase_ : float = 0.0 ,lowercase_ : Optional[torch.Generator] = None ,lowercase_ : Optional[torch.FloatTensor] = None ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowercase_ : int = 1 ,**lowercase_ : List[str] ,):
return self.pipea(
prompt=lowercase_ ,height=lowercase_ ,width=lowercase_ ,num_inference_steps=lowercase_ ,guidance_scale=lowercase_ ,negative_prompt=lowercase_ ,num_images_per_prompt=lowercase_ ,eta=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,output_type=lowercase_ ,return_dict=lowercase_ ,callback=lowercase_ ,callback_steps=lowercase_ ,**lowercase_ ,)
@torch.no_grad()
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, List[str]] ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_0 ,lowercase_ : float = 7.5 ,lowercase_ : Optional[Union[str, List[str]]] = None ,lowercase_ : Optional[int] = 1 ,lowercase_ : float = 0.0 ,lowercase_ : Optional[torch.Generator] = None ,lowercase_ : Optional[torch.FloatTensor] = None ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowercase_ : int = 1 ,**lowercase_ : Dict ,):
return self.pipea(
prompt=lowercase_ ,height=lowercase_ ,width=lowercase_ ,num_inference_steps=lowercase_ ,guidance_scale=lowercase_ ,negative_prompt=lowercase_ ,num_images_per_prompt=lowercase_ ,eta=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,output_type=lowercase_ ,return_dict=lowercase_ ,callback=lowercase_ ,callback_steps=lowercase_ ,**lowercase_ ,)
@torch.no_grad()
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Union[str, List[str]] ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_0 ,lowercase_ : float = 7.5 ,lowercase_ : Optional[Union[str, List[str]]] = None ,lowercase_ : Optional[int] = 1 ,lowercase_ : float = 0.0 ,lowercase_ : Optional[torch.Generator] = None ,lowercase_ : Optional[torch.FloatTensor] = None ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowercase_ : int = 1 ,**lowercase_ : Dict ,):
return self.pipea(
prompt=lowercase_ ,height=lowercase_ ,width=lowercase_ ,num_inference_steps=lowercase_ ,guidance_scale=lowercase_ ,negative_prompt=lowercase_ ,num_images_per_prompt=lowercase_ ,eta=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,output_type=lowercase_ ,return_dict=lowercase_ ,callback=lowercase_ ,callback_steps=lowercase_ ,**lowercase_ ,)
@torch.no_grad()
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Union[str, List[str]] ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_0 ,lowercase_ : float = 7.5 ,lowercase_ : Optional[Union[str, List[str]]] = None ,lowercase_ : Optional[int] = 1 ,lowercase_ : float = 0.0 ,lowercase_ : Optional[torch.Generator] = None ,lowercase_ : Optional[torch.FloatTensor] = None ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowercase_ : int = 1 ,**lowercase_ : Optional[int] ,):
return self.pipea(
prompt=lowercase_ ,height=lowercase_ ,width=lowercase_ ,num_inference_steps=lowercase_ ,guidance_scale=lowercase_ ,negative_prompt=lowercase_ ,num_images_per_prompt=lowercase_ ,eta=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,output_type=lowercase_ ,return_dict=lowercase_ ,callback=lowercase_ ,callback_steps=lowercase_ ,**lowercase_ ,)
@torch.no_grad()
def __lowerCAmelCase ( self : str ,lowercase_ : Union[str, List[str]] ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_1_2 ,lowercase_ : int = 5_0 ,lowercase_ : float = 7.5 ,lowercase_ : Optional[Union[str, List[str]]] = None ,lowercase_ : Optional[int] = 1 ,lowercase_ : float = 0.0 ,lowercase_ : Optional[torch.Generator] = None ,lowercase_ : Optional[torch.FloatTensor] = None ,lowercase_ : Optional[str] = "pil" ,lowercase_ : bool = True ,lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowercase_ : int = 1 ,**lowercase_ : Dict ,):
lowerCAmelCase__ : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowercase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase__ : Any = self.textaimg_sda_a(
prompt=lowercase_ ,height=lowercase_ ,width=lowercase_ ,num_inference_steps=lowercase_ ,guidance_scale=lowercase_ ,negative_prompt=lowercase_ ,num_images_per_prompt=lowercase_ ,eta=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,output_type=lowercase_ ,return_dict=lowercase_ ,callback=lowercase_ ,callback_steps=lowercase_ ,**lowercase_ ,)
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase__ : Any = self.textaimg_sda_a(
prompt=lowercase_ ,height=lowercase_ ,width=lowercase_ ,num_inference_steps=lowercase_ ,guidance_scale=lowercase_ ,negative_prompt=lowercase_ ,num_images_per_prompt=lowercase_ ,eta=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,output_type=lowercase_ ,return_dict=lowercase_ ,callback=lowercase_ ,callback_steps=lowercase_ ,**lowercase_ ,)
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase__ : List[Any] = self.textaimg_sda_a(
prompt=lowercase_ ,height=lowercase_ ,width=lowercase_ ,num_inference_steps=lowercase_ ,guidance_scale=lowercase_ ,negative_prompt=lowercase_ ,num_images_per_prompt=lowercase_ ,eta=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,output_type=lowercase_ ,return_dict=lowercase_ ,callback=lowercase_ ,callback_steps=lowercase_ ,**lowercase_ ,)
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase__ : Optional[Any] = self.textaimg_sda_a(
prompt=lowercase_ ,height=lowercase_ ,width=lowercase_ ,num_inference_steps=lowercase_ ,guidance_scale=lowercase_ ,negative_prompt=lowercase_ ,num_images_per_prompt=lowercase_ ,eta=lowercase_ ,generator=lowercase_ ,latents=lowercase_ ,output_type=lowercase_ ,return_dict=lowercase_ ,callback=lowercase_ ,callback_steps=lowercase_ ,**lowercase_ ,)
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 450
|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __get__( self : List[Any] ,lowercase_ : Any ,lowercase_ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
lowerCAmelCase__ : Optional[Any] = '''__cached_''' + self.fget.__name__
lowerCAmelCase__ : Any = getattr(lowercase_ ,lowercase_ ,lowercase_ )
if cached is None:
lowerCAmelCase__ : str = self.fget(lowercase_ )
setattr(lowercase_ ,lowercase_ ,lowercase_ )
return cached
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def __SCREAMING_SNAKE_CASE ( A_ ):
if is_torch_fx_proxy(A_ ):
return True
if is_torch_available():
import torch
if isinstance(A_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(A_ , np.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return isinstance(A_ , np.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return _is_numpy(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
return isinstance(A_ , torch.Tensor )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
return isinstance(A_ , torch.device )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch_device(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import torch
if isinstance(A_ , A_ ):
if hasattr(A_ , A_ ):
lowerCAmelCase__ : int = getattr(A_ , A_ )
else:
return False
return isinstance(A_ , torch.dtype )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_torch_available() else _is_torch_dtype(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import tensorflow as tf
return isinstance(A_ , tf.Tensor )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_tf_available() else _is_tensorflow(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A_ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(A_ )
return type(A_ ) == tf.Tensor
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
import jax.numpy as jnp # noqa: F811
return isinstance(A_ , jnp.ndarray )
def __SCREAMING_SNAKE_CASE ( A_ ):
return False if not is_flax_available() else _is_jax(A_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , (dict, UserDict) ):
return {k: to_py_obj(A_ ) for k, v in obj.items()}
elif isinstance(A_ , (list, tuple) ):
return [to_py_obj(A_ ) for o in obj]
elif is_tf_tensor(A_ ):
return obj.numpy().tolist()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A_ ):
return np.asarray(A_ ).tolist()
elif isinstance(A_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , (dict, UserDict) ):
return {k: to_numpy(A_ ) for k, v in obj.items()}
elif isinstance(A_ , (list, tuple) ):
return np.array(A_ )
elif is_tf_tensor(A_ ):
return obj.numpy()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A_ ):
return np.asarray(A_ )
else:
return obj
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = fields(self )
# Safety and consistency checks
if not len(lowercase_ ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
lowerCAmelCase__ : str = getattr(self ,class_fields[0].name )
lowerCAmelCase__ : List[str] = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowercase_ ):
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : str = first_field.items()
lowerCAmelCase__ : List[str] = True
else:
try:
lowerCAmelCase__ : Union[str, Any] = iter(lowercase_ )
lowerCAmelCase__ : int = True
except TypeError:
lowerCAmelCase__ : Dict = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowercase_ ):
if (
not isinstance(lowercase_ ,(list, tuple) )
or not len(lowercase_ ) == 2
or not isinstance(element[0] ,lowercase_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ : Tuple = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
lowerCAmelCase__ : Dict = element[1]
elif first_field is not None:
lowerCAmelCase__ : Any = first_field
else:
for field in class_fields:
lowerCAmelCase__ : Any = getattr(self ,field.name )
if v is not None:
lowerCAmelCase__ : List[str] = v
def __delitem__( self : List[str] ,*lowercase_ : List[str] ,**lowercase_ : Any ):
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : Optional[int] ,*lowercase_ : Union[str, Any] ,**lowercase_ : List[Any] ):
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : str ,*lowercase_ : Union[str, Any] ,**lowercase_ : Any ):
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : int ,*lowercase_ : List[str] ,**lowercase_ : int ):
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : Any ,lowercase_ : Any ):
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Optional[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Dict ,lowercase_ : Dict ,lowercase_ : int ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowercase_ ,lowercase_ )
super().__setattr__(lowercase_ ,lowercase_ )
def __setitem__( self : str ,lowercase_ : Optional[int] ,lowercase_ : Optional[Any] ):
# Will raise a KeyException if needed
super().__setitem__(lowercase_ ,lowercase_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ):
return tuple(self[k] for k in self.keys() )
class SCREAMING_SNAKE_CASE ( a_ , a_ ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : Dict ,lowercase_ : Optional[Any] ):
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "longest"
lowercase__ = "max_length"
lowercase__ = "do_not_pad"
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "pt"
lowercase__ = "tf"
lowercase__ = "np"
lowercase__ = "jax"
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : List[ContextManager] ):
lowerCAmelCase__ : Optional[int] = context_managers
lowerCAmelCase__ : Tuple = ExitStack()
def __enter__( self : str ):
for context_manager in self.context_managers:
self.stack.enter_context(lowercase_ )
def __exit__( self : Tuple ,*lowercase_ : Tuple ,**lowercase_ : List[Any] ):
self.stack.__exit__(*lowercase_ ,**lowercase_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Union[str, Any] = infer_framework(A_ )
if framework == "tf":
lowerCAmelCase__ : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Dict = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : List[str] = model_class.__name__
lowerCAmelCase__ : List[Any] = infer_framework(A_ )
if framework == "tf":
lowerCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ : Optional[int] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __SCREAMING_SNAKE_CASE ( A_ , A_ = "" , A_ = "." ):
def _flatten_dict(A_ , A_="" , A_="." ):
for k, v in d.items():
lowerCAmelCase__ : Any = str(A_ ) + delimiter + str(A_ ) if parent_key else k
if v and isinstance(A_ , A_ ):
yield from flatten_dict(A_ , A_ , delimiter=A_ ).items()
else:
yield key, v
return dict(_flatten_dict(A_ , A_ , A_ ) )
@contextmanager
def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __SCREAMING_SNAKE_CASE ( A_ , A_=None ):
if is_numpy_array(A_ ):
return np.transpose(A_ , axes=A_ )
elif is_torch_tensor(A_ ):
return array.T if axes is None else array.permute(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.transpose(A_ , perm=A_ )
elif is_jax_tensor(A_ ):
return jnp.transpose(A_ , axes=A_ )
else:
raise ValueError(f'Type not supported for transpose: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if is_numpy_array(A_ ):
return np.reshape(A_ , A_ )
elif is_torch_tensor(A_ ):
return array.reshape(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.reshape(A_ , A_ )
elif is_jax_tensor(A_ ):
return jnp.reshape(A_ , A_ )
else:
raise ValueError(f'Type not supported for reshape: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_=None ):
if is_numpy_array(A_ ):
return np.squeeze(A_ , axis=A_ )
elif is_torch_tensor(A_ ):
return array.squeeze() if axis is None else array.squeeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.squeeze(A_ , axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.squeeze(A_ , axis=A_ )
else:
raise ValueError(f'Type not supported for squeeze: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if is_numpy_array(A_ ):
return np.expand_dims(A_ , A_ )
elif is_torch_tensor(A_ ):
return array.unsqueeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.expand_dims(A_ , axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.expand_dims(A_ , axis=A_ )
else:
raise ValueError(f'Type not supported for expand_dims: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ ):
if is_numpy_array(A_ ):
return np.size(A_ )
elif is_torch_tensor(A_ ):
return array.numel()
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.size(A_ )
elif is_jax_tensor(A_ ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(A_ )}.' )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
for key, value in auto_map.items():
if isinstance(A_ , (tuple, list) ):
lowerCAmelCase__ : Tuple = [f'{repo_id}--{v}' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ : Tuple = f'{repo_id}--{value}'
return auto_map
def __SCREAMING_SNAKE_CASE ( A_ ):
for base_class in inspect.getmro(A_ ):
lowerCAmelCase__ : List[str] = base_class.__module__
lowerCAmelCase__ : List[Any] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' )
| 450
| 1
|
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
'compression_format, is_archive' ,[
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] ,)
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase__ : Union[str, Any] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
UpperCAmelCase__ : List[Any] = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCAmelCase__ : Tuple = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
assert base_extractor.is_extractable(__UpperCamelCase )
UpperCAmelCase__ : List[str] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(__UpperCamelCase ,__UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : List[str] = file_path.read_text(encoding='utf-8' )
else:
UpperCAmelCase__ : Any = output_path.read_text(encoding='utf-8' )
UpperCAmelCase__ : Any = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
'compression_format, is_archive' ,[
('7z', True),
('bz2', False),
('gzip', False),
('lz4', False),
('tar', True),
('xz', False),
('zip', True),
('zstd', False),
] ,)
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,):
UpperCAmelCase__ : List[Any] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
UpperCAmelCase__ : Any = input_paths[compression_format]
if input_path is None:
UpperCAmelCase__ : Optional[Any] = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__UpperCamelCase )
UpperCAmelCase__ : Dict = Extractor.infer_extractor_format(__UpperCamelCase )
assert extractor_format is not None
UpperCAmelCase__ : Optional[Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCAmelCase__ : Optional[int] = file_path.read_text(encoding='utf-8' )
else:
UpperCAmelCase__ : Union[str, Any] = output_path.read_text(encoding='utf-8' )
UpperCAmelCase__ : Any = text_file.read_text(encoding='utf-8' )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowerCamelCase ( _snake_case ,_snake_case ):
import tarfile
UpperCAmelCase__ : Any = tmp_path / '''data_dot_dot'''
directory.mkdir()
UpperCAmelCase__ : Optional[Any] = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(__UpperCamelCase ,'w' ) as f:
f.add(__UpperCamelCase ,arcname=os.path.join('..' ,text_file.name ) )
return path
@pytest.fixture
def lowerCamelCase ( _snake_case ):
import tarfile
UpperCAmelCase__ : int = tmp_path / '''data_sym_link'''
directory.mkdir()
UpperCAmelCase__ : List[Any] = directory / '''tar_file_with_sym_link.tar'''
os.symlink('..' ,directory / 'subdir' ,target_is_directory=__UpperCamelCase )
with tarfile.TarFile(__UpperCamelCase ,'w' ) as f:
f.add(str(directory / 'subdir' ) ,arcname='subdir' ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
'insecure_tar_file, error_log' ,[('tar_file_with_dot_dot', 'illegal path'), ('tar_file_with_sym_link', 'Symlink')] ,)
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : int = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
UpperCAmelCase__ : str = insecure_tar_files[insecure_tar_file]
UpperCAmelCase__ : Dict = tmp_path / '''extracted'''
TarExtractor.extract(__UpperCamelCase ,__UpperCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Any = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
UpperCAmelCase__ : int = (
b'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
b'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
b'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
b'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open('wb' ) as f:
f.write(__UpperCamelCase )
assert zipfile.is_zipfile(str(__UpperCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__UpperCamelCase ) # but we're right
| 711
|
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=True ):
model.train()
UpperCAmelCase__ : List[Any] = model(_snake_case )
UpperCAmelCase__ : int = F.mse_loss(_snake_case ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_snake_case )
def lowerCamelCase ( _snake_case ,_snake_case=False ):
set_seed(42 )
UpperCAmelCase__ : Union[str, Any] = RegressionModel()
UpperCAmelCase__ : Any = deepcopy(_snake_case )
UpperCAmelCase__ : Dict = RegressionDataset(length=80 )
UpperCAmelCase__ : str = DataLoader(_snake_case ,batch_size=16 )
model.to(accelerator.device )
if sched:
UpperCAmelCase__ : Any = AdamW(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase__ : Optional[int] = AdamW(params=ddp_model.parameters() ,lr=1e-3 )
UpperCAmelCase__ : Any = LambdaLR(_snake_case ,lr_lambda=lambda _snake_case : epoch**0.65 )
UpperCAmelCase__ : List[str] = LambdaLR(_snake_case ,lr_lambda=lambda _snake_case : epoch**0.65 )
# Make a copy of `model`
if sched:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = accelerator.prepare(_snake_case ,_snake_case ,_snake_case ,_snake_case )
else:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = accelerator.prepare(_snake_case ,_snake_case )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCamelCase ( _snake_case ):
# Test when on a single CPU or GPU that the context manager does nothing
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = get_training_setup(_snake_case )
# Use a single batch
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = next(iter(_snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : str = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_snake_case ):
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case )
else:
# Sync grads
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_snake_case ,_snake_case ,_snake_case ,_snake_case )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : Tuple = ddp_input[torch.randperm(len(_snake_case ) )]
def lowerCamelCase ( _snake_case ):
# Test on distributed setup that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = get_training_setup(_snake_case )
# Use a single batch
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = next(iter(_snake_case ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : int = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_snake_case ):
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case )
else:
# Sync grads
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : int = ddp_input[torch.randperm(len(_snake_case ) )]
def lowerCamelCase ( _snake_case=False ,_snake_case=False ):
UpperCAmelCase__ : Union[str, Any] = Accelerator(
split_batches=_snake_case ,dispatch_batches=_snake_case ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = get_training_setup(_snake_case )
for iteration, batch in enumerate(_snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : str = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_snake_case ):
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_snake_case ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
UpperCAmelCase__ : Optional[Any] = ddp_input[torch.randperm(len(_snake_case ) )]
GradientState._reset_state()
def lowerCamelCase ( _snake_case=False ,_snake_case=False ):
UpperCAmelCase__ : Optional[Any] = Accelerator(
split_batches=_snake_case ,dispatch_batches=_snake_case ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = get_training_setup(_snake_case ,_snake_case )
for iteration, batch in enumerate(_snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = batch.values()
# Gather the distributed inputs and targs for the base model
UpperCAmelCase__ , UpperCAmelCase__ : Any = accelerator.gather((ddp_input, ddp_target) )
UpperCAmelCase__ , UpperCAmelCase__ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_snake_case )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_snake_case ):
step_model(_snake_case ,_snake_case ,_snake_case ,_snake_case )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
UpperCAmelCase__ : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_snake_case ))
if accelerator.num_processes > 1:
check_model_parameters(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCamelCase ( ):
UpperCAmelCase__ : str = Accelerator()
UpperCAmelCase__ : Optional[int] = RegressionDataset(length=80 )
UpperCAmelCase__ : str = DataLoader(_snake_case ,batch_size=16 )
UpperCAmelCase__ : List[str] = RegressionDataset(length=96 )
UpperCAmelCase__ : List[str] = DataLoader(_snake_case ,batch_size=16 )
UpperCAmelCase__ , UpperCAmelCase__ : int = accelerator.prepare(_snake_case ,_snake_case )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_snake_case )
if iteration < len(_snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_snake_case ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_snake_case )
if batch_num < len(_snake_case ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCamelCase ( ):
UpperCAmelCase__ : Tuple = Accelerator()
UpperCAmelCase__ : Dict = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(_snake_case )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(_snake_case )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' ,F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' ,)
test_gradient_accumulation(_snake_case ,_snake_case )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' ,'2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' ,'`split_batches=False`, `dispatch_batches=False`**' ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' ,F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' ,)
test_gradient_accumulation_with_opt_and_scheduler(_snake_case ,_snake_case )
def lowerCamelCase ( _snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 254
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCAmelCase ( lowerCAmelCase__):
__lowercase : Dict = """marian"""
__lowercase : List[str] = ["""past_key_values"""]
__lowercase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __SCREAMING_SNAKE_CASE=5_8101 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=4096 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=5_8100 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=5_8100 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[str]:
'''simple docstring'''
__snake_case = vocab_size
__snake_case = decoder_vocab_size or vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , )
class lowerCAmelCase ( lowerCAmelCase__):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(__a ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__snake_case = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(__a , self ).outputs
if self.use_past:
__snake_case , __snake_case = self.num_layers
for i in range(__a ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> List[Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
__snake_case = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**__a , **__a )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
__snake_case = common_inputs['''decoder_input_ids'''].shape[1]
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__a , __a )] , dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case , __snake_case = self.num_layers
__snake_case = min(__a , __a )
__snake_case = max(__a , __a ) - min_num_layers
__snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__a ):
common_inputs["past_key_values"].append(
(
torch.zeros(__a ),
torch.zeros(__a ),
torch.zeros(__a ),
torch.zeros(__a ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__a , __a ):
common_inputs["past_key_values"].append((torch.zeros(__a ), torch.zeros(__a )) )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> List[Any]:
'''simple docstring'''
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__snake_case , __snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case , __snake_case = self.num_layers
__snake_case , __snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs['''attention_mask'''].dtype
__snake_case = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__a , __a , dtype=__a )] , dim=1 )
__snake_case = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(__a )
]
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> Dict:
'''simple docstring'''
__snake_case = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(__a )
__snake_case = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(__a , return_tensors=__a ) )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = -1 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , ) -> List[str]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
else:
__snake_case = self._generate_dummy_inputs_for_causal_lm(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
return common_inputs
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(__a , __a , __a , __a )
else:
__snake_case = super(__a , self )._flatten_past_key_values_(
__a , __a , __a , __a )
@property
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return 1E-4
| 24
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [False] * len(_UpperCamelCase )
__lowerCAmelCase = []
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [-1] * (len(_UpperCamelCase ))
__lowerCAmelCase = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = float("Inf" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_UpperCamelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
return max_flow
A : Union[str, Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A , A : Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 636
| 0
|
from __future__ import annotations
def UpperCAmelCase__ ( lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ) -> tuple[int, float, str]:
__lowercase = cipher_alphabet or [chr(lowercase__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__lowercase = {
"""a""": 0.08497,
"""b""": 0.01492,
"""c""": 0.02202,
"""d""": 0.04253,
"""e""": 0.11162,
"""f""": 0.02228,
"""g""": 0.02015,
"""h""": 0.06094,
"""i""": 0.07546,
"""j""": 0.00153,
"""k""": 0.01292,
"""l""": 0.04025,
"""m""": 0.02406,
"""n""": 0.06749,
"""o""": 0.07507,
"""p""": 0.01929,
"""q""": 0.00095,
"""r""": 0.07587,
"""s""": 0.06327,
"""t""": 0.09356,
"""u""": 0.02758,
"""v""": 0.00978,
"""w""": 0.02560,
"""x""": 0.00150,
"""y""": 0.01994,
"""z""": 0.00077,
}
else:
# Custom frequencies dictionary
__lowercase = frequencies_dict
if not case_sensitive:
__lowercase = ciphertext.lower()
# Chi squared statistic values
__lowercase = {}
# cycle through all of the shifts
for shift in range(len(lowercase__ ) ):
__lowercase = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__lowercase = (alphabet_letters.index(letter.lower() ) - shift) % len(
lowercase__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__lowercase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__lowercase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase = decrypted_with_shift.lower().count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__lowercase = decrypted_with_shift.count(lowercase__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__lowercase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__lowercase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__lowercase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(lowercase__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__lowercase = min(
lowercase__ , key=lowercase__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__lowercase
) , (
__lowercase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 634
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase )
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : str = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
lowercase__ : ClassVar[Features] = Features({"""summary""": Value("""string""" )} )
lowercase__ : str = "text"
lowercase__ : str = "summary"
@property
def snake_case__ ( self : List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 634
| 1
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : list ) -> list:
'''simple docstring'''
a__ : int = False
while is_sorted is False: # Until all the indices are traversed keep looping
a__ : Dict = True
for i in range(0 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
a__ , a__ : Any = input_list[i + 1], input_list[i]
# swapping if elements not in order
a__ : str = False
for i in range(1 , len(lowerCAmelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
a__ , a__ : Any = input_list[i + 1], input_list[i]
# swapping if elements not in order
a__ : Union[str, Any] = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
__UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
__UpperCAmelCase = odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 642
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase : int = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = (3, 32, 1_28)
a__ : Any = tempfile.mkdtemp()
# fmt: off
a__ : str = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
a__ : List[Any] = dict(zip(a_ , range(len(a_ ) ) ) )
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
a__ : Any = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
a__ : Tuple = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a_ , a_ )
def UpperCAmelCase ( self : List[str] , **a_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : List[Any] , **a_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase ( self : Dict ) -> int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
a__ : Any = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
a__ : List[str] = Image.fromarray(np.moveaxis(a_ , 0 , -1 ) )
return image_input
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_tokenizer()
a__ : Any = self.get_image_processor()
a__ : Optional[int] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : Optional[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ : Tuple = self.get_image_processor()
a__ : Optional[int] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
a__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : Any = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
a__ : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : str = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Any = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = self.prepare_image_inputs()
a__ : List[Any] = image_processor(a_ , return_tensors="np" )
a__ : Optional[Any] = processor(images=a_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : List[Any] = self.get_tokenizer()
a__ : int = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = "test"
a__ : Any = processor(text=a_ )
a__ : Tuple = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : List[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : str = "test"
a__ : str = self.prepare_image_inputs()
a__ : Any = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
a__ : Any = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
a__ : Any = processor.char_decode(a_ )
a__ : str = tokenizer.batch_decode(a_ )
a__ : Union[str, Any] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(a_ , a_ )
def UpperCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
a__ : List[str] = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : Any = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : str = None
a__ : Optional[Any] = self.prepare_image_inputs()
a__ : Optional[int] = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : Optional[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
a__ : List[str] = torch.randn(1 , 27 , 38 )
a__ : Tuple = torch.randn(1 , 27 , 5_02_57 )
a__ : List[Any] = torch.randn(1 , 27 , 3_05_22 )
a__ : Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 642
| 1
|
"""simple docstring"""
from math import factorial
def __UpperCAmelCase ( __lowerCamelCase = 1_00 ) -> int:
return sum(map(__lowerCamelCase , str(factorial(__lowerCamelCase ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 122
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __A :
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Any ,_snake_case : str=13 ,_snake_case : int=64 ,_snake_case : Dict=2 ,_snake_case : int=3 ,_snake_case : Optional[Any]=True ,_snake_case : List[str]=True ,_snake_case : Dict=32 ,_snake_case : int=5 ,_snake_case : Any=4 ,_snake_case : Optional[int]=37 ,_snake_case : Dict="gelu" ,_snake_case : Union[str, Any]=0.1 ,_snake_case : List[Any]=0.1 ,_snake_case : int=10 ,_snake_case : Any=0.02 ,_snake_case : List[str]=[1, 16, 4, 4] ,_snake_case : str=None ,) -> List[str]:
"""simple docstring"""
lowercase__ : Optional[int] = parent
lowercase__ : Tuple = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : Dict = num_channels
lowercase__ : str = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : Dict = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : str = type_sequence_label_size
lowercase__ : Tuple = initializer_range
lowercase__ : Union[str, Any] = scope
lowercase__ : Optional[Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase__ : List[str] = (self.image_size // 32) ** 2
lowercase__ : List[str] = num_patches + 1
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : str = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_snake_case ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=_snake_case ,)
def UpperCAmelCase ( self : int ,_snake_case : Dict ,_snake_case : str ,_snake_case : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = ViTHybridModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Dict ,_snake_case : Dict ,_snake_case : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = self.type_sequence_label_size
lowercase__ : str = ViTHybridForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : List[str] = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[Any] = config_and_inputs
lowercase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowerCAmelCase : Optional[int] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
lowercase__ : str = ViTHybridModelTester(self )
lowercase__ : int = ConfigTester(self ,config_class=_snake_case ,has_text_modality=_snake_case ,hidden_size=37 )
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(_snake_case )
lowercase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : Any ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(config=_snake_case )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase__ : Optional[int] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@slow
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[int] = ViTHybridModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __UpperCAmelCase ( ) -> Dict:
lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(**_snake_case )
# verify the logits
lowercase__ : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : str = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
@slow
@require_accelerate
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
lowercase__ : Dict = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' ,device_map='''auto''' )
lowercase__ : Optional[int] = prepare_img()
lowercase__ : List[str] = image_processor(images=_snake_case ,return_tensors='''pt''' )
lowercase__ : Union[str, Any] = model(**_snake_case )
lowercase__ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase__ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] ,'''tabby, tabby cat''' )
| 122
| 1
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class __UpperCamelCase ( lowercase__ ):
def __init__( self :Union[str, Any] ,*_UpperCamelCase :List[str] ,**_UpperCamelCase :Tuple ):
super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
def a__ ( self :List[str] ,_UpperCamelCase :Any ,_UpperCamelCase :str ):
snake_case_ : Any = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCamelCase )
snake_case_ : List[Any] = self.values[key]
def a__ ( self :Union[str, Any] ):
return (
sum(self.charge_factor - len(_UpperCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a__ ( self :Dict ,_UpperCamelCase :Tuple ,_UpperCamelCase :int=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCamelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCamelCase ,_UpperCamelCase )
| 334
|
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = len(lowerCamelCase_ )
while cur > 1:
# Find the maximum number in arr
snake_case_ : Optional[Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
snake_case_ : List[str] = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase_ )]
# Reverse whole list
snake_case_ : Any = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
__A : List[Any] = input('Enter numbers separated by a comma:\n').strip()
__A : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 334
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : str = 13
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 7
SCREAMING_SNAKE_CASE__ : List[Any] = 30
SCREAMING_SNAKE_CASE__ : Any = self.seq_length + self.mem_len
SCREAMING_SNAKE_CASE__ : Optional[int] = 15
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Any = 99
SCREAMING_SNAKE_CASE__ : Tuple = [10, 50, 80]
SCREAMING_SNAKE_CASE__ : Tuple = 32
SCREAMING_SNAKE_CASE__ : Dict = 32
SCREAMING_SNAKE_CASE__ : Dict = 4
SCREAMING_SNAKE_CASE__ : List[str] = 8
SCREAMING_SNAKE_CASE__ : Any = 1_28
SCREAMING_SNAKE_CASE__ : Any = 2
SCREAMING_SNAKE_CASE__ : str = 2
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
SCREAMING_SNAKE_CASE__ : Tuple = self.vocab_size - 1
SCREAMING_SNAKE_CASE__ : Tuple = 0.01
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __magic_name__ (self ) -> int:
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFTransfoXLModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
SCREAMING_SNAKE_CASE__ : Any = {"""input_ids""": input_ids_a, """mems""": mems_a}
SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[str] = {"""input_ids""": input_ids_a, """labels""": lm_labels}
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
SCREAMING_SNAKE_CASE__ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE__) : Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__UpperCamelCase : Any = () if is_tf_available() else ()
__UpperCamelCase : str = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Any = False
__UpperCamelCase : Any = False
__UpperCamelCase : Union[str, Any] = False
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TFTransfoXLModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , d_embed=37 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
self.model_tester.set_seed()
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Dict = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
SCREAMING_SNAKE_CASE__ : int = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Layer )
SCREAMING_SNAKE_CASE__ : str = model.get_bias()
assert name is None
else:
SCREAMING_SNAKE_CASE__ : int = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE__ : List[Any] = model.get_bias()
assert name is None
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def __magic_name__ (self ) -> str:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __magic_name__ (self ) -> str:
"""simple docstring"""
pass
@require_tf
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
SCREAMING_SNAKE_CASE__ : Any = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ , max_length=2_00 , do_sample=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE__ )
| 713
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''unispeech'''
def __init__(self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=3_20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.5 , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE__ : Any = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Dict = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_dropout
SCREAMING_SNAKE_CASE__ : int = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = final_dropout
SCREAMING_SNAKE_CASE__ : str = layerdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_ctc_classes
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : str = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ : Dict = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : Tuple = apply_spec_augment
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_time_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Any = mask_feature_length
SCREAMING_SNAKE_CASE__ : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ : Dict = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ : Any = num_codevector_groups
SCREAMING_SNAKE_CASE__ : str = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ : str = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_negatives
SCREAMING_SNAKE_CASE__ : Tuple = codevector_dim
SCREAMING_SNAKE_CASE__ : List[Any] = proj_codevector_dim
SCREAMING_SNAKE_CASE__ : Tuple = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ : int = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : str = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE__ : List[str] = replace_prob
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 545
| 0
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : Optional[Any] = parent
__a : int = config_class
__a : Any = has_text_modality
__a : List[Any] = kwargs
__a : Dict = common_properties
def _lowerCamelCase ( self ):
__a : Tuple = self.config_class(**self.inputs_dict )
__a : Any = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) , msg=f"""`{prop}` does not exist""" )
# Test that config has the common properties as setter
for idx, name in enumerate(_UpperCAmelCase ):
try:
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.parent.assertEqual(
getattr(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , msg=f"""`{name} value {idx} expected, but was {getattr(_UpperCAmelCase , _UpperCAmelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_UpperCAmelCase ):
try:
__a : int = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , msg=f"""`{name} value {idx} expected, but was {getattr(_UpperCAmelCase , _UpperCAmelCase )}""" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.config_class(**self.inputs_dict )
__a : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = os.path.join(_UpperCAmelCase , '''config.json''' )
config_first.to_json_file(_UpperCAmelCase )
__a : Tuple = self.config_class.from_json_file(_UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_UpperCAmelCase )
__a : Optional[int] = self.config_class.from_pretrained(_UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowerCamelCase ( self ):
__a : Dict = self.config_class(**self.inputs_dict )
__a : str = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
config_first.save_pretrained(_UpperCAmelCase )
__a : Optional[int] = self.config_class.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _lowerCamelCase ( self ):
__a : Any = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__a : Dict = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _lowerCamelCase ( self ):
if self.config_class.is_composition:
return
__a : Union[str, Any] = self.config_class()
self.parent.assertIsNotNone(_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = copy.deepcopy(_UpperCAmelCase )
__a : Dict = self.config_class(**_UpperCAmelCase )
__a : List[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_UpperCAmelCase , _UpperCAmelCase ) != value:
wrong_values.append((key, getattr(_UpperCAmelCase , _UpperCAmelCase ), value) )
if len(_UpperCAmelCase ) > 0:
__a : List[Any] = '''\n'''.join([f"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] )
raise ValueError(f"""The following keys were not properly set in the config:\n{errors}""" )
def _lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 52
|
'''simple docstring'''
def lowercase__ ( __lowercase : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
__UpperCamelCase = float(__lowercase )
except ValueError:
raise ValueError('Please enter a valid number' )
__UpperCamelCase = decimal - int(__lowercase )
if fractional_part == 0:
return int(__lowercase ), 1
else:
__UpperCamelCase = len(str(__lowercase ).split('.' )[1] )
__UpperCamelCase = int(decimal * (10**number_of_frac_digits) )
__UpperCamelCase = 10**number_of_frac_digits
__UpperCamelCase , __UpperCamelCase = denominator, numerator
while True:
__UpperCamelCase = dividend % divisor
if remainder == 0:
break
__UpperCamelCase , __UpperCamelCase = divisor, remainder
__UpperCamelCase , __UpperCamelCase = numerator / divisor, denominator / divisor
return int(__lowercase ), int(__lowercase )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 399
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case (__UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase__ :Any = IFImgaImgSuperResolutionPipeline
lowerCAmelCase__ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowerCAmelCase__ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase__ :Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _a ( self ) -> str:
return self._get_superresolution_dummy_components()
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ) -> List[Any]:
if str(UpperCAmelCase_ ).startswith("mps" ):
lowercase__ = torch.manual_seed(UpperCAmelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowercase__ = floats_tensor((1, 3, 32, 32) ,rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowercase__ = floats_tensor((1, 3, 16, 16) ,rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def _a ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self ) -> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" ,reason="float16 requires CUDA" )
def _a ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self ) -> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self ) -> Any:
self._test_save_load_local()
def _a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 711
|
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Optional[int] = "align_text_model"
def __init__( self ,UpperCAmelCase_=30_522 ,UpperCAmelCase_=768 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=3_072 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=512 ,UpperCAmelCase_=2 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-1_2 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,) -> List[str]:
super().__init__(**UpperCAmelCase_ )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = pad_token_id
@classmethod
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowercase__ , lowercase__ = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowercase__ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Tuple = "align_vision_model"
def __init__( self ,UpperCAmelCase_ = 3 ,UpperCAmelCase_ = 600 ,UpperCAmelCase_ = 2.0 ,UpperCAmelCase_ = 3.1 ,UpperCAmelCase_ = 8 ,UpperCAmelCase_ = [3, 3, 5, 3, 5, 5, 3] ,UpperCAmelCase_ = [32, 16, 24, 40, 80, 112, 192] ,UpperCAmelCase_ = [16, 24, 40, 80, 112, 192, 320] ,UpperCAmelCase_ = [] ,UpperCAmelCase_ = [1, 2, 2, 2, 1, 2, 1] ,UpperCAmelCase_ = [1, 2, 2, 3, 3, 4, 1] ,UpperCAmelCase_ = [1, 6, 6, 6, 6, 6, 6] ,UpperCAmelCase_ = 0.25 ,UpperCAmelCase_ = "swish" ,UpperCAmelCase_ = 2_560 ,UpperCAmelCase_ = "mean" ,UpperCAmelCase_ = 0.02 ,UpperCAmelCase_ = 0.0_01 ,UpperCAmelCase_ = 0.99 ,UpperCAmelCase_ = 0.2 ,**UpperCAmelCase_ ,) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = width_coefficient
lowercase__ = depth_coefficient
lowercase__ = depth_divisor
lowercase__ = kernel_sizes
lowercase__ = in_channels
lowercase__ = out_channels
lowercase__ = depthwise_padding
lowercase__ = strides
lowercase__ = num_block_repeats
lowercase__ = expand_ratios
lowercase__ = squeeze_expansion_ratio
lowercase__ = hidden_act
lowercase__ = hidden_dim
lowercase__ = pooling_type
lowercase__ = initializer_range
lowercase__ = batch_norm_eps
lowercase__ = batch_norm_momentum
lowercase__ = drop_connect_rate
lowercase__ = sum(UpperCAmelCase_ ) * 4
@classmethod
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCAmelCase_ )
lowercase__ , lowercase__ = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls ,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Union[str, Any] = "align"
lowerCAmelCase__ :Tuple = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=640 ,UpperCAmelCase_=1.0 ,UpperCAmelCase_=0.02 ,**UpperCAmelCase_ ,) -> Union[str, Any]:
super().__init__(**UpperCAmelCase_ )
if text_config is None:
lowercase__ = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
lowercase__ = AlignTextConfig(**UpperCAmelCase_ )
lowercase__ = AlignVisionConfig(**UpperCAmelCase_ )
lowercase__ = projection_dim
lowercase__ = temperature_init_value
lowercase__ = initializer_range
@classmethod
def _a ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> List[str]:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**UpperCAmelCase_ )
def _a ( self ) -> List[Any]:
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.text_config.to_dict()
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 539
| 0
|
'''simple docstring'''
from typing import Any
class _snake_case :
def __init__( self ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = data
UpperCAmelCase_ : List[str] = None
class _snake_case :
def __init__( self ):
UpperCAmelCase_ : str = None
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.head
while temp is not None:
print(temp.data ,end=" " )
UpperCAmelCase_ : Optional[Any] = temp.next
print()
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = Node(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.head
UpperCAmelCase_ : List[str] = new_node
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
if node_data_a == node_data_a:
return
else:
UpperCAmelCase_ : Any = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ : List[Any] = node_a.next
UpperCAmelCase_ : str = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase_ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase_ , UpperCAmelCase_ : Any = node_a.data, node_a.data
if __name__ == "__main__":
_lowerCamelCase = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 71
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71
| 1
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( UpperCAmelCase_ , unittest.TestCase ):
lowerCamelCase =BlenderbotSmallTokenizer
lowerCamelCase =False
def lowercase_( self : List[Any] ):
"""simple docstring"""
super().setUp()
__A : Union[str, Any] = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
__A : Dict = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__A : List[str] = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
__A : Union[str, Any] = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
__A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowercase_( self : Union[str, Any] , **lowerCamelCase : Dict ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase_( self : Union[str, Any] , lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
__A : int = """adapt act apte"""
__A : Optional[int] = """adapt act apte"""
return input_text, output_text
def lowercase_( self : List[Any] ):
"""simple docstring"""
__A : List[str] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__A : int = """adapt act apte"""
__A : Any = ["""adapt""", """act""", """ap@@""", """te"""]
__A : Optional[Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__A : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__A : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def lowercase_( self : str ):
"""simple docstring"""
__A : Any = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
__A : Any = """I am a small frog."""
__A : Optional[Any] = tok([src_text] , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__A : Tuple = tok.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowercase_( self : int ):
"""simple docstring"""
__A : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
__A : List[Any] = """I am a small frog ."""
__A : Any = """."""
__A : List[Any] = tok(_lowercase )["""input_ids"""]
__A : Union[str, Any] = tok(_lowercase )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 703
|
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
__A : Optional[int] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
__A : Tuple = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" )
__A : Optional[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
__A : Optional[int] = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
return image
def A_ ( __SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
if "visual_encoder" in key:
__A : Dict = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __SCREAMING_SNAKE_CASE )
if "blocks" in key:
__A : Dict = re.sub(R"""blocks""" , """layers""" , __SCREAMING_SNAKE_CASE )
if "attn" in key:
__A : Union[str, Any] = re.sub(R"""attn""" , """self_attn""" , __SCREAMING_SNAKE_CASE )
if "norm1" in key:
__A : str = re.sub(R"""norm1""" , """layer_norm1""" , __SCREAMING_SNAKE_CASE )
if "norm2" in key:
__A : List[Any] = re.sub(R"""norm2""" , """layer_norm2""" , __SCREAMING_SNAKE_CASE )
if "encoder.norm" in key:
__A : Optional[Any] = re.sub(R"""encoder.norm""" , """post_layernorm""" , __SCREAMING_SNAKE_CASE )
if "encoder.patch_embed.proj" in key:
__A : Optional[int] = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __SCREAMING_SNAKE_CASE )
if "encoder.pos_embed" in key:
__A : Union[str, Any] = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , __SCREAMING_SNAKE_CASE )
if "encoder.cls_token" in key:
__A : Tuple = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , __SCREAMING_SNAKE_CASE )
if "self_attn" in key:
__A : Tuple = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , __SCREAMING_SNAKE_CASE )
return key
@torch.no_grad()
def A_ ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=None ) -> int:
"""simple docstring"""
if config_path is not None:
__A : Any = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__A : List[Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__A : List[Any] = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
__A : List[str] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
__A : List[str] = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=384 , vit="""base""" )
__A : List[str] = pt_model.eval()
__A : int = pt_model.state_dict()
for key in modified_state_dict.copy():
__A : Tuple = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__A : Dict = rename_key(__SCREAMING_SNAKE_CASE )
__A : Tuple = value
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__A : List[Any] = 384
__A : Dict = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device="""cpu""" )
__A : Dict = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__A : Optional[Any] = tokenizer(["""a picture of"""] ).input_ids
__A : int = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__A : str = hf_model.generate(__SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__A : List[Any] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
__A : List[Any] = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="""base""" )
vqa_model.eval()
__A : List[Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
__A : List[Any] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__A : int = rename_key(__SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = value
__A : Any = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE )
hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE )
__A : Tuple = ["""How many dogs are in this image?"""]
__A : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
__A : List[str] = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
__A : str = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
__A : List[str] = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="""base""" )
itm_model.eval()
__A : List[str] = itm_model.state_dict()
for key in modified_state_dict.copy():
__A : Optional[Any] = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
__A : str = rename_key(__SCREAMING_SNAKE_CASE )
__A : Any = value
__A : List[Any] = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE )
__A : Tuple = ["""A picture of a woman with a dog sitting in a beach"""]
__A : List[str] = tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding="""max_length""" , truncation=__SCREAMING_SNAKE_CASE , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE )
hf_itm_model.eval()
__A : Any = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
__A : Optional[Any] = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
A__ : Tuple =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
A__ : Any =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 499
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = 0
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
A__ = Path(UpperCAmelCase__) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w'''))
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
A__ = Path(UpperCAmelCase__) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w'''))
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
A__ = Path(UpperCAmelCase__) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w'''))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__).to_dict()
config_dict.pop('''image_processor_type''')
A__ = CLIPImageProcessor(**UpperCAmelCase__)
# save in new folder
model_config.save_pretrained(UpperCAmelCase__)
config.save_pretrained(UpperCAmelCase__)
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
# make sure private variable is not incorrectly saved
A__ = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , '''clip-base is not a local folder and is not a valid model identifier'''):
A__ = AutoImageProcessor.from_pretrained('''clip-base''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase__):
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__)
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase__)
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__ , trust_remote_code=UpperCAmelCase__)
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase__)
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__):
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
A__ = Path(UpperCAmelCase__) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w'''))
A__ = CustomImageProcessor.from_pretrained(UpperCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase__)
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase__)
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
# If remote code is not set, the default is to use local
A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(not hasattr(UpperCAmelCase__ , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 87
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger("""transformers.models.speecht5""")
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
hf_model.apply_weight_norm()
A__ = checkpoint['''input_conv.weight_g''']
A__ = checkpoint['''input_conv.weight_v''']
A__ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
A__ = checkpoint[f"""upsamples.{i}.1.weight_g"""]
A__ = checkpoint[f"""upsamples.{i}.1.weight_v"""]
A__ = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
A__ = checkpoint['''output_conv.1.weight_g''']
A__ = checkpoint['''output_conv.1.weight_v''']
A__ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> str:
"""simple docstring"""
if config_path is not None:
A__ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ )
else:
A__ = SpeechTaHifiGanConfig()
A__ = SpeechTaHifiGan(lowercase_ )
A__ = torch.load(lowercase_ )
load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ )
A__ = np.load(lowercase_ )
A__ = stats[0].reshape(-1 )
A__ = stats[1].reshape(-1 )
A__ = torch.from_numpy(lowercase_ ).float()
A__ = torch.from_numpy(lowercase_ ).float()
model.save_pretrained(lowercase_ )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 87
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Union[str, Any]= logging.get_logger(__name__)
A__ : Tuple= {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCamelCase ( _a ):
a : Tuple ="""yolos"""
def __init__( self , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=[512, 864] , snake_case_=16 , snake_case_=3 , snake_case_=True , snake_case_=100 , snake_case_=True , snake_case_=False , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=5 , snake_case_=2 , snake_case_=0.1 , **snake_case_ , ) -> str:
super().__init__(**snake_case_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class __lowerCamelCase ( _a ):
a : int =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A__ : Any= logging.get_logger(__name__)
A__ : str= {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class __lowerCamelCase ( _a ):
a : List[str] ="""layoutlmv3"""
def __init__( self , snake_case_=5_0265 , snake_case_=768 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_=1024 , snake_case_=128 , snake_case_=128 , snake_case_=True , snake_case_=32 , snake_case_=128 , snake_case_=64 , snake_case_=256 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=224 , snake_case_=3 , snake_case_=16 , snake_case_=None , **snake_case_ , ) -> Union[str, Any]:
super().__init__(
vocab_size=snake_case_ , hidden_size=snake_case_ , num_hidden_layers=snake_case_ , num_attention_heads=snake_case_ , intermediate_size=snake_case_ , hidden_act=snake_case_ , hidden_dropout_prob=snake_case_ , attention_probs_dropout_prob=snake_case_ , max_position_embeddings=snake_case_ , type_vocab_size=snake_case_ , initializer_range=snake_case_ , layer_norm_eps=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
UpperCamelCase__ = max_ad_position_embeddings
UpperCamelCase__ = coordinate_size
UpperCamelCase__ = shape_size
UpperCamelCase__ = has_relative_attention_bias
UpperCamelCase__ = rel_pos_bins
UpperCamelCase__ = max_rel_pos
UpperCamelCase__ = has_spatial_attention_bias
UpperCamelCase__ = rel_ad_pos_bins
UpperCamelCase__ = max_rel_ad_pos
UpperCamelCase__ = text_embed
UpperCamelCase__ = visual_embed
UpperCamelCase__ = input_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = classifier_dropout
class __lowerCamelCase ( _a ):
a : Tuple =version.parse("""1.12""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 40 , snake_case_ = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , snake_case_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = processor.tokenizer.num_special_tokens_to_add(snake_case_ )
UpperCamelCase__ = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCamelCase__ = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCamelCase__ = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase__ = dict(
processor(
snake_case_ , text=snake_case_ , boxes=snake_case_ , return_tensors=snake_case_ , ) )
return inputs
| 20
| 1
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
@dataclass
class snake_case :
lowerCAmelCase__ :str
lowerCAmelCase__ :List[str]
lowerCAmelCase__ :Optional[List[str]]
@dataclass
class snake_case :
lowerCAmelCase__ :List[int]
lowerCAmelCase__ :List[int]
lowerCAmelCase__ :Optional[List[int]] = None
lowerCAmelCase__ :Optional[List[int]] = None
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :Any = "train"
lowerCAmelCase__ :Optional[Any] = "dev"
lowerCAmelCase__ :str = "test"
class snake_case :
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _a ( UpperCAmelCase_ ) -> List[str]:
raise NotImplementedError
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=False ,UpperCAmelCase_="[CLS]" ,UpperCAmelCase_=1 ,UpperCAmelCase_="[SEP]" ,UpperCAmelCase_=False ,UpperCAmelCase_=False ,UpperCAmelCase_=0 ,UpperCAmelCase_=0 ,UpperCAmelCase_=-100 ,UpperCAmelCase_=0 ,UpperCAmelCase_=True ,) -> List[InputFeatures]:
lowercase__ = {label: i for i, label in enumerate(UpperCAmelCase_ )}
lowercase__ = []
for ex_index, example in enumerate(UpperCAmelCase_ ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d" ,UpperCAmelCase_ ,len(UpperCAmelCase_ ) )
lowercase__ = []
lowercase__ = []
for word, label in zip(example.words ,example.labels ):
lowercase__ = tokenizer.tokenize(UpperCAmelCase_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCAmelCase_ ) > 0:
tokens.extend(UpperCAmelCase_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCAmelCase_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowercase__ = tokenizer.num_special_tokens_to_add()
if len(UpperCAmelCase_ ) > max_seq_length - special_tokens_count:
lowercase__ = tokens[: (max_seq_length - special_tokens_count)]
lowercase__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowercase__ = [sequence_a_segment_id] * len(UpperCAmelCase_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowercase__ = [cls_token] + tokens
lowercase__ = [pad_token_label_id] + label_ids
lowercase__ = [cls_token_segment_id] + segment_ids
lowercase__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowercase__ = [1 if mask_padding_with_zero else 0] * len(UpperCAmelCase_ )
# Zero-pad up to the sequence length.
lowercase__ = max_seq_length - len(UpperCAmelCase_ )
if pad_on_left:
lowercase__ = ([pad_token] * padding_length) + input_ids
lowercase__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowercase__ = ([pad_token_segment_id] * padding_length) + segment_ids
lowercase__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" ,example.guid )
logger.info("tokens: %s" ," ".join([str(UpperCAmelCase_ ) for x in tokens] ) )
logger.info("input_ids: %s" ," ".join([str(UpperCAmelCase_ ) for x in input_ids] ) )
logger.info("input_mask: %s" ," ".join([str(UpperCAmelCase_ ) for x in input_mask] ) )
logger.info("segment_ids: %s" ," ".join([str(UpperCAmelCase_ ) for x in segment_ids] ) )
logger.info("label_ids: %s" ," ".join([str(UpperCAmelCase_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = None
features.append(
InputFeatures(
input_ids=UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,token_type_ids=UpperCAmelCase_ ,label_ids=UpperCAmelCase_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class snake_case (UpperCamelCase ):
lowerCAmelCase__ :List[InputFeatures]
lowerCAmelCase__ :int = nn.CrossEntropyLoss().ignore_index
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_=False ,UpperCAmelCase_ = Split.train ,) -> str:
# Load data features from cache or dataset file
lowercase__ = os.path.join(
UpperCAmelCase_ ,"cached_{}_{}_{}".format(mode.value ,tokenizer.__class__.__name__ ,str(UpperCAmelCase_ ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
lowercase__ = torch.load(UpperCAmelCase_ )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
lowercase__ = token_classification_task.read_examples_from_file(UpperCAmelCase_ ,UpperCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,cls_token_at_end=bool(model_type in ["xlnet"] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["xlnet"] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=UpperCAmelCase_ ,pad_on_left=bool(tokenizer.padding_side == "left" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features ,UpperCAmelCase_ )
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self ,UpperCAmelCase_ ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class snake_case :
lowerCAmelCase__ :List[InputFeatures]
lowerCAmelCase__ :int = -100
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_=False ,UpperCAmelCase_ = Split.train ,) -> List[Any]:
lowercase__ = token_classification_task.read_examples_from_file(UpperCAmelCase_ ,UpperCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ = token_classification_task.convert_examples_to_features(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,cls_token_at_end=bool(model_type in ["xlnet"] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["xlnet"] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=UpperCAmelCase_ ,pad_on_left=bool(tokenizer.padding_side == "left" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ = tf.data.Dataset.from_generator(
UpperCAmelCase_ ,({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) ,(
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
lowercase__ = tf.data.Dataset.from_generator(
UpperCAmelCase_ ,({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) ,(
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def _a ( self ) -> Union[str, Any]:
lowercase__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self ,UpperCAmelCase_ ) -> InputFeatures:
return self.features[i]
| 267
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : list ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(_snake_case ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCamelCase ( _snake_case : list ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 1
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def A_ ( self ) -> str:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ort.SessionOptions()
_UpperCamelCase = False
return options
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_UpperCamelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=a , feature_extractor=a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a )
_UpperCamelCase = """A red cat sitting on a park bench"""
_UpperCamelCase = np.random.RandomState(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 202
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 202
| 1
|
"""simple docstring"""
def lowerCamelCase__ ( UpperCAmelCase_ )-> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
UpperCamelCase = grid[0]
for row_n in range(1 , len(UpperCAmelCase_ ) ):
UpperCamelCase = grid[row_n]
UpperCamelCase = fill_row(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = grid[row_n]
return grid[-1][-1]
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCAmelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 554
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCamelCase__ ( UpperCAmelCase_ )-> Tuple:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
return max(metric_fn(UpperCAmelCase_ , UpperCAmelCase_ ) for gt in ground_truths )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = []
if args.gold_data_mode == "qa":
UpperCamelCase = pd.read_csv(UpperCAmelCase_ , sep="\t" , header=UpperCAmelCase_ )
for answer_list in data[1]:
UpperCamelCase = ast.literal_eval(UpperCAmelCase_ )
answers.append(UpperCAmelCase_ )
else:
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = [[reference] for reference in references]
UpperCamelCase = UpperCamelCase = UpperCamelCase = 0
for prediction, ground_truths in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
fa += metric_max_over_ground_truths(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = 100.0 * em / total
UpperCamelCase = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = args.k
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = [line.strip() for line in open(UpperCAmelCase_ , "r" ).readlines()]
UpperCamelCase = UpperCamelCase = 0
for hypo, reference in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = set(hypo.split("\t" )[:k] )
UpperCamelCase = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
UpperCamelCase = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
def strip_title(UpperCAmelCase_ ):
if title.startswith("\"" ):
UpperCamelCase = title[1:]
if title.endswith("\"" ):
UpperCamelCase = title[:-1]
return title
UpperCamelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , )["input_ids"].to(args.device )
UpperCamelCase = rag_model.rag.question_encoder(UpperCAmelCase_ )
UpperCamelCase = question_enc_outputs[0]
UpperCamelCase = rag_model.retriever(
UpperCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
UpperCamelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
UpperCamelCase = []
for docs in all_docs:
UpperCamelCase = [strip_title(UpperCAmelCase_ ) for title in docs["title"]]
provenance_strings.append("\t".join(UpperCAmelCase_ ) )
return provenance_strings
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[int]:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ , return_tensors="pt" , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
UpperCamelCase = inputs_dict.input_ids.to(args.device )
UpperCamelCase = inputs_dict.attention_mask.to(args.device )
UpperCamelCase = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=UpperCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
UpperCamelCase = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
logger.info("Q: {} - A: {}".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
return answers
def lowerCamelCase__ ( )-> Any:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=UpperCAmelCase_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=UpperCAmelCase_ , choices=["exact", "compressed", "legacy"] , type=UpperCAmelCase_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=UpperCAmelCase_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=UpperCAmelCase_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=UpperCAmelCase_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=UpperCAmelCase_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=UpperCAmelCase_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=UpperCAmelCase_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=UpperCAmelCase_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=UpperCAmelCase_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=UpperCAmelCase_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
UpperCamelCase = parser.parse_args()
UpperCamelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[Any]:
"""simple docstring"""
UpperCamelCase = {}
if args.model_type is None:
UpperCamelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
UpperCamelCase = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
UpperCamelCase = args.n_docs
if args.index_name is not None:
UpperCamelCase = args.index_name
if args.index_path is not None:
UpperCamelCase = args.index_path
else:
UpperCamelCase = BartForConditionalGeneration
UpperCamelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , UpperCAmelCase_ )
UpperCamelCase = get_scores if args.eval_mode == "e2e" else get_precision_at_k
UpperCamelCase = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(UpperCAmelCase_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
UpperCamelCase = RagRetriever.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , retriever=UpperCAmelCase_ , **UpperCAmelCase_ )
model.retriever.init_retrieval()
else:
UpperCamelCase = model_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
UpperCamelCase = []
for line in tqdm(UpperCAmelCase_ ):
questions.append(line.strip() )
if len(UpperCAmelCase_ ) == args.eval_batch_size:
UpperCamelCase = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write("\n".join(UpperCAmelCase_ ) + "\n" )
preds_file.flush()
UpperCamelCase = []
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = evaluate_batch_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
preds_file.write("\n".join(UpperCAmelCase_ ) )
preds_file.flush()
score_fn(UpperCAmelCase_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = get_args()
main(args)
| 554
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Any = FlaxAutoModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(lowerCAmelCase_ ):
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = FlaxAutoModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase_ : List[Any] ):
return model(**lowerCAmelCase_ )
eval(**lowerCAmelCase_ ).block_until_ready()
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
for model_name in ["roberta-base", "roberta-large"]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = FlaxRobertaModel.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**lowerCAmelCase_ : Dict ):
return model(**lowerCAmelCase_ )
eval(**lowerCAmelCase_ ).block_until_ready()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
with self.assertRaisesRegex(
lowerCAmelCase_ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ : Optional[int] = FlaxAutoModel.from_pretrained("bert-base" )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
with self.assertRaisesRegex(
lowerCAmelCase_ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ : Dict = FlaxAutoModel.from_pretrained(lowerCAmelCase_ , revision="aaaaaa" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(
lowerCAmelCase_ , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
UpperCAmelCase_ : str = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
with self.assertRaisesRegex(lowerCAmelCase_ , "Use `from_pt=True` to load this model" ):
UpperCAmelCase_ : Union[str, Any] = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 463
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''levit'''
def __init__( self : List[str] , lowerCAmelCase_ : int=224 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : Optional[Any]=16 , lowerCAmelCase_ : Tuple=[128, 256, 384] , lowerCAmelCase_ : Optional[int]=[4, 8, 12] , lowerCAmelCase_ : str=[4, 4, 4] , lowerCAmelCase_ : Dict=[16, 16, 16] , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : Optional[int]=[2, 2, 2] , lowerCAmelCase_ : Any=[2, 2, 2] , lowerCAmelCase_ : int=0.0_2 , **lowerCAmelCase_ : List[Any] , ) -> List[str]:
super().__init__(**lowerCAmelCase_ )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : str = kernel_size
UpperCAmelCase_ : List[Any] = stride
UpperCAmelCase_ : List[str] = padding
UpperCAmelCase_ : Any = hidden_sizes
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : int = key_dim
UpperCAmelCase_ : List[str] = drop_path_rate
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : Tuple = attention_ratio
UpperCAmelCase_ : Optional[int] = mlp_ratio
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
return 1e-4
| 463
| 1
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def __UpperCAmelCase ( *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ):
"""simple docstring"""
pass
def snake_case ( lowerCAmelCase_ ) -> str:
_snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
A__ : List[str] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
_snake_case = DepthEstimationPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , __lowerCamelCase )
import datasets
_snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , __lowerCamelCase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
pass
@slow
@require_torch
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = '''Intel/dpt-large'''
_snake_case = pipeline('''depth-estimation''' , model=__lowerCamelCase )
_snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
_snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 103
|
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE_ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self :Dict, snake_case :str="sayef/fsner-bert-base-uncased"):
"""simple docstring"""
super(snake_case, self).__init__()
_lowercase =AutoModel.from_pretrained(snake_case, return_dict=snake_case)
_lowercase =torch.nn.CosineSimilarity(3, 1e-0_8)
_lowercase =torch.nn.Softmax(dim=1)
def UpperCamelCase__ ( self :str, **snake_case :int):
"""simple docstring"""
return self.bert(**snake_case).last_hidden_state
def UpperCamelCase__ ( self :Union[str, Any], snake_case :Optional[Any]):
"""simple docstring"""
return token_embeddings.sum(2, keepdim=snake_case)
def UpperCamelCase__ ( self :List[Any], snake_case :int, snake_case :Dict, snake_case :Dict=1):
"""simple docstring"""
return self.softmax(T * self.cos(snake_case, snake_case))
def UpperCamelCase__ ( self :List[str], snake_case :int, snake_case :List[str]):
"""simple docstring"""
_lowercase =W_supports['sizes'].tolist()
_lowercase =W_supports['start_token_id'].item()
_lowercase =W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowercase =self.BERT(**snake_case)
_lowercase =self.BERT(**snake_case)
_lowercase =None
_lowercase =None
_lowercase =W_supports['input_ids'] == start_token_id
_lowercase =W_supports['input_ids'] == end_token_id
for i, size in enumerate(snake_case):
if i == 0:
_lowercase =0
else:
_lowercase =support_sizes[i - 1]
_lowercase =S[s : s + size][start_token_masks[s : s + size]]
_lowercase =S[s : s + size][end_token_masks[s : s + size]]
_lowercase =torch.matmul(q[i], s_start.T).sum(1).softmax(0)
_lowercase =torch.matmul(q[i], s_end.T).sum(1).softmax(0)
if p_starts is not None:
_lowercase =torch.vstack((p_starts, p_start))
_lowercase =torch.vstack((p_ends, p_end))
else:
_lowercase =p_start
_lowercase =p_end
return p_starts, p_ends
| 181
| 0
|
class _SCREAMING_SNAKE_CASE :
def __init__( self )-> str:
lowerCamelCase_ ={}
def _snake_case ( self )-> None:
print(self.vertex )
for i in self.vertex:
print(_SCREAMING_SNAKE_CASE , """ -> """ , """ -> """.join([str(_SCREAMING_SNAKE_CASE ) for j in self.vertex[i]] ) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_SCREAMING_SNAKE_CASE )
else:
# else make a new vertex
lowerCamelCase_ =[to_vertex]
def _snake_case ( self )-> None:
# visited array for storing already visited nodes
lowerCamelCase_ =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )-> None:
# mark start vertex as visited
lowerCamelCase_ =True
print(_SCREAMING_SNAKE_CASE , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Union[str, Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 75
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__A : int = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Any = "albert"
def __init__( self , _SCREAMING_SNAKE_CASE=3_0000 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=1_6384 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE="gelu_new" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , **_SCREAMING_SNAKE_CASE , )-> Optional[int]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =embedding_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_hidden_groups
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =inner_group_num
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =classifier_dropout_prob
lowerCamelCase_ =position_embedding_type
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
@property
def _snake_case ( self )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase_ ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase_ ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 75
| 1
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _SCREAMING_SNAKE_CASE ( lowercase : NDArray[floataa] , lowercase : NDArray[floataa] , lowercase : list[int] , lowercase : int , ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = coefficient_matrix.shape
lowerCamelCase_ , lowerCamelCase_ = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase_ = f"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(lowercase )
if colsa != 1:
lowerCamelCase_ = f"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(lowercase )
if rowsa != rowsa:
lowerCamelCase_ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(lowercase )
if len(lowercase ) != rowsa:
lowerCamelCase_ = (
'Number of initial values must be equal to number of rows in coefficient '
f"""matrix but received {len(lowercase )} and {rowsa}"""
)
raise ValueError(lowercase )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
lowerCamelCase_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCamelCase_ , lowerCamelCase_ = table.shape
strictly_diagonally_dominant(lowercase )
# Iterates the whole matrix for given number of times
for _ in range(lowercase ):
lowerCamelCase_ = []
for row in range(lowercase ):
lowerCamelCase_ = 0
for col in range(lowercase ):
if col == row:
lowerCamelCase_ = table[row][col]
elif col == cols - 1:
lowerCamelCase_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase_ = (temp + val) / denom
new_val.append(lowercase )
lowerCamelCase_ = new_val
return [float(lowercase ) for i in new_val]
def _SCREAMING_SNAKE_CASE ( lowercase : NDArray[floataa] ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = table.shape
lowerCamelCase_ = True
for i in range(0 , lowercase ):
lowerCamelCase_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _UpperCAmelCase :
def __init__( self : Tuple , A : Any , A : Dict=13 , A : Union[str, Any]=7 , A : List[Any]=True , A : List[Any]=True , A : Tuple=False , A : Optional[Any]=True , A : Tuple=99 , A : Tuple=32 , A : Dict=5 , A : int=4 , A : List[Any]=37 , A : Optional[int]="gelu" , A : List[str]=0.1 , A : List[Any]=0.1 , A : Optional[Any]=5_12 , A : Dict=16 , A : str=2 , A : int=0.02 , A : Optional[int]=3 , A : Tuple=4 , A : List[str]=None , ) -> Union[str, Any]:
lowercase_ : Dict = parent
lowercase_ : List[str] = batch_size
lowercase_ : int = seq_length
lowercase_ : List[str] = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : List[Any] = use_token_type_ids
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Optional[Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : str = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : Union[str, Any] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : Tuple = num_labels
lowercase_ : Union[str, Any] = num_choices
lowercase_ : Optional[int] = scope
def A ( self : str ) -> Optional[int]:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[Any] = None
if self.use_token_type_ids:
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : List[str] = None
lowercase_ : str = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[Any] ) -> int:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def A ( self : List[Any] , A : Optional[Any] , A : str , A : Union[str, Any] , A : Dict , A : Optional[int] , A : str , A : Union[str, Any] ) -> Any:
lowercase_ : Optional[int] = LlamaModel(config=A )
model.to(A )
model.eval()
lowercase_ : Tuple = model(A , attention_mask=A )
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : str , A : Dict , A : Optional[int] , A : List[Any] , A : List[Any] , A : int , A : List[str] , A : int , A : List[Any] , A : int , ) -> Tuple:
lowercase_ : str = True
lowercase_ : str = LlamaModel(A )
model.to(A )
model.eval()
lowercase_ : str = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
lowercase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , )
lowercase_ : Dict = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , A : Optional[Any] , A : Optional[int] , A : Union[str, Any] , A : Union[str, Any] , A : Dict , A : Optional[int] , A : Union[str, Any] , A : List[Any] , A : List[Any] , ) -> Tuple:
lowercase_ : Optional[Any] = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
lowercase_ : Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , A : List[str] , A : Dict , A : Dict , A : int , A : Any , A : Optional[int] , A : str , A : Dict , A : Optional[Any] , ) -> int:
lowercase_ : Any = True
lowercase_ : str = True
lowercase_ : List[str] = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
lowercase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
lowercase_ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
lowercase_ : Dict = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
lowercase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def A ( self : Union[str, Any] ) -> List[Any]:
lowercase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Dict = False
def A ( self : Dict ) -> List[Any]:
lowercase_ : Any = LlamaModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : Any ) -> Any:
self.config_tester.run_common_tests()
def A ( self : List[Any] ) -> Union[str, Any]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> int:
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : int = type
self.model_tester.create_and_check_model(*A )
def A ( self : int ) -> Optional[int]:
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = 3
lowercase_ : Dict = input_dict['''input_ids''']
lowercase_ : List[str] = input_ids.ne(1 ).to(A )
lowercase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : int = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : int = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : int ) -> Optional[int]:
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = 3
lowercase_ : Tuple = '''single_label_classification'''
lowercase_ : str = input_dict['''input_ids''']
lowercase_ : Any = input_ids.ne(1 ).to(A )
lowercase_ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : Any = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Any ) -> Union[str, Any]:
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Tuple = 3
lowercase_ : int = '''multi_label_classification'''
lowercase_ : Optional[Any] = input_dict['''input_ids''']
lowercase_ : Dict = input_ids.ne(1 ).to(A )
lowercase_ : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ : Optional[Any] = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def A ( self : Union[str, Any] ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def A ( self : int , A : int ) -> Optional[int]:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = ids_tensor([1, 10] , config.vocab_size )
lowercase_ : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : Optional[Any] = LlamaModel(A )
original_model.to(A )
original_model.eval()
lowercase_ : List[str] = original_model(A ).last_hidden_state
lowercase_ : int = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : List[Any] = {'''type''': scaling_type, '''factor''': 10.0}
lowercase_ : int = LlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
lowercase_ : Union[str, Any] = scaled_model(A ).last_hidden_state
lowercase_ : Optional[int] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : List[str] ) -> List[str]:
lowercase_ : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowercase_ : List[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowercase_ : Optional[int] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Optional[int] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : Tuple ) -> str:
lowercase_ : Optional[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowercase_ : Tuple = model(torch.tensor(A ) )
# Expected mean on dim = -1
lowercase_ : Optional[Any] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Union[str, Any] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def A ( self : List[Any] ) -> Dict:
lowercase_ : Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowercase_ : List[Any] = model(torch.tensor(A ) )
# Expected mean on dim = -1
lowercase_ : List[str] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowercase_ : Dict = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def A ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ : List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
lowercase_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowercase_ : Union[str, Any] = model(torch.tensor(A ) )
lowercase_ : Any = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1e-2 , rtol=1e-2 )
# fmt: off
lowercase_ : Optional[Any] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , A , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def A ( self : str ) -> Tuple:
lowercase_ : List[str] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowercase_ : Any = '''Simply put, the theory of relativity states that '''
lowercase_ : Optional[Any] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowercase_ : Union[str, Any] = tokenizer.encode(A , return_tensors='''pt''' )
lowercase_ : List[Any] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=A )
# greedy generation outputs
lowercase_ : List[str] = model.generate(A , max_new_tokens=64 , top_p=A , temperature=1 , do_sample=A )
lowercase_ : Union[str, Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=A )
self.assertEqual(A , A )
| 231
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 664
|
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
UpperCAmelCase_ = {"UserAgent": UserAgent().random}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->dict:
_lowerCAmelCase = script.contents[0]
_lowerCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCAmelCase :
def __init__( self , _lowerCAmelCase ):
_lowerCAmelCase = F'''https://www.instagram.com/{username}/'''
_lowerCAmelCase = self.get_json()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = requests.get(self.url , headers=_lowerCAmelCase ).text
_lowerCAmelCase = BeautifulSoup(_lowerCAmelCase , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __lowerCAmelCase ( self ):
return self.user_data["username"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["full_name"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["biography"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["business_email"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["external_url"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_verified"]
@property
def __lowerCAmelCase ( self ):
return self.user_data["is_private"]
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "github" )->None:
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
_lowerCAmelCase = InstagramUser(_SCREAMING_SNAKE_CASE )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , _SCREAMING_SNAKE_CASE )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = InstagramUser("github")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 664
| 1
|
'''simple docstring'''
def lowerCAmelCase (__A , __A = False):
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''')
# array bounds provided by analysis
_a = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
_a = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__A , 1):
if n < _p:
# then we have our last prime to check
_a = primes[:idx]
break
_a , _a = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
_a = False
for r in range(__A):
_a = pow(__A , d * 2**r , __A)
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
_a = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def lowerCAmelCase ():
"""simple docstring"""
assert not miller_rabin(561)
assert miller_rabin(563)
# 2047
assert not miller_rabin(838_201)
assert miller_rabin(838_207)
# 1_373_653
assert not miller_rabin(17_316_001)
assert miller_rabin(17_316_017)
# 25_326_001
assert not miller_rabin(3_078_386_641)
assert miller_rabin(3_078_386_653)
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801)
assert miller_rabin(1_713_045_574_819)
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307)
assert miller_rabin(2_779_799_728_327)
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441)
assert miller_rabin(113_850_023_909_527)
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351)
assert miller_rabin(1_275_041_018_848_804_391)
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867)
assert miller_rabin(79_666_464_458_507_787_791_951)
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333)
assert miller_rabin(552_840_677_446_647_897_660_359)
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 11
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowercase = 2_048
_lowercase = 4_096
_lowercase = 42
_lowercase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_lowercase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def lowerCAmelCase__ ( __magic_name__ ) ->str:
def choose_first(__magic_name__ , __magic_name__=False ):
assert isinstance(__magic_name__ , __magic_name__ )
if len(__magic_name__ ) == 1:
__lowercase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__lowercase = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
__lowercase = {"id": example["id"]}
__lowercase = example["annotations"]
__lowercase = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
__lowercase = ["yes"] if 1 in yes_no_answer else ["no"]
__lowercase = __lowercase = []
__lowercase = __lowercase = []
__lowercase = ["<cls>"]
else:
__lowercase = ["short"]
__lowercase = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
__lowercase = ["long"]
__lowercase = choose_first(annotation["long_answer"] , is_long_answer=__magic_name__ )
__lowercase = []
answer.update(__magic_name__ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
__lowercase = True
else:
__lowercase = False
__lowercase = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , __magic_name__ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def lowerCAmelCase__ ( __magic_name__ , __magic_name__=False ) ->int:
__lowercase = _get_single_answer(__magic_name__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase = example["document"]["tokens"]
__lowercase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(__magic_name__ ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__lowercase = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__lowercase = example["document"]["tokens"]
__lowercase = answer["start_token"]
__lowercase = answer["end_token"]
__lowercase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__lowercase = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
__lowercase = doc["is_html"][answer["start_token"] : answer["end_token"]]
__lowercase = doc["token"][answer["start_token"] : answer["end_token"]]
__lowercase = " ".join([old[i] for i in range(len(__magic_name__ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , __magic_name__ , end="\n" )
print("Old:" , __magic_name__ , end="\n\n" )
return {
"context": " ".join(__magic_name__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__=2_0_4_8 , __magic_name__=4_0_9_6 , __magic_name__=True ) ->Optional[Any]:
# overlap will be of doc_stride - q_len
__lowercase = get_context_and_ans(__magic_name__ , assertion=__magic_name__ )
__lowercase = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__lowercase = tokenizer(example["question"]["text"] , out["context"] ).input_ids
__lowercase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__lowercase = []
__lowercase = []
__lowercase = input_ids[:q_len]
__lowercase = range(__magic_name__ , len(__magic_name__ ) , max_length - doc_stride )
for i in doc_start_indices:
__lowercase = i + max_length - q_len
__lowercase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(__magic_name__ ),
"end_token": [-1_0_0] * len(__magic_name__ ),
"category": category,
},
}
__lowercase = out["context"].split()
__lowercase = splitted_context[answer["end_token"]]
__lowercase = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=__magic_name__ , ).input_ids )
__lowercase = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=__magic_name__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__lowercase = len(tokenizer(__magic_name__ , add_special_tokens=__magic_name__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__lowercase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
__lowercase = answer["start_token"]
__lowercase = answer["end_token"]
if assertion:
__lowercase = tokenizer.decode(__magic_name__ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , __magic_name__ , end="\n\n" )
if len(__magic_name__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__lowercase = input_ids[:q_len]
__lowercase = range(__magic_name__ , len(__magic_name__ ) , max_length - doc_stride )
__lowercase = []
__lowercase = []
__lowercase = []
__lowercase = [] # null, yes, no, long, short
for i in doc_start_indices:
__lowercase = i + max_length - q_len
__lowercase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__lowercase = start_token - i + q_len
__lowercase = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
__lowercase = -1_0_0
__lowercase = -1_0_0
answers_category.append("null" )
__lowercase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__magic_name__ )
answers_end_token.append(__magic_name__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(__magic_name__ ) )
print("Old:" , tokenizer.decode(__magic_name__ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__=2_0_4_8 , __magic_name__=4_0_9_6 , __magic_name__=False ) ->List[str]:
__lowercase = get_strided_contexts_and_ans(
__magic_name__ , __magic_name__ , doc_stride=__magic_name__ , max_length=__magic_name__ , assertion=__magic_name__ , )
return example
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->Any:
with jsonlines.open(__magic_name__ , "a" ) as writer:
for example in tqdm(__magic_name__ , total=len(__magic_name__ ) , desc="Saving samples ... " ):
__lowercase = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowercase = load_dataset('''natural_questions''')
_lowercase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_lowercase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_lowercase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_lowercase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowercase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_lowercase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 118
| 0
|
def UpperCAmelCase__ ( UpperCAmelCase__ :list[int] , UpperCAmelCase__ :int ):
'''simple docstring'''
a = len(UpperCAmelCase__ )
a = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
a = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
a = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
a = subset[i - 1][j]
if arr[i - 1] <= j:
a = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__ :Optional[int] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :Any ):
'''simple docstring'''
a = TaConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
a = TaForConditionalGeneration(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A_ : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 32
| 0
|
'''simple docstring'''
__A : Optional[int] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__A : str = ['a', 'b', 'c', 'd', 'e']
def lowerCAmelCase_ ( a : Optional[Any] , a : Optional[Any] , a : Optional[Any] ):
a__ = start
# add current to visited
visited.append(a )
a__ = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
a__ = topological_sort(a , a , a )
# if all neighbors visited add current to sort
sort.append(a )
# if all vertices haven't been visited select a new one to visit
if len(a ) != len(a ):
for vertice in vertices:
if vertice not in visited:
a__ = topological_sort(a , a , a )
# return sort
return sort
if __name__ == "__main__":
__A : List[Any] = topological_sort('a', [], [])
print(sort)
| 394
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase_ ( a : Optional[int] , a : Tuple=False ):
try:
a__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ = default
else:
# KEY is set, convert it to True or False.
try:
a__ = strtobool(a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__A : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skip('Test was skipped' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(a )
def lowerCAmelCase_ ( a : str ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(a )
def lowerCAmelCase_ ( a : Optional[int] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(a )
def lowerCAmelCase_ ( a : Optional[Any] ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(a )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(a )
def lowerCAmelCase_ ( a : Tuple ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(a )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(a )
def lowerCAmelCase_ ( a : Tuple ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(a )
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(a )
def lowerCAmelCase_ ( a : int=None , a : Dict=None ):
if test_case is None:
return partial(a , version=a )
return unittest.skipUnless(is_torch_version('>=' , a ) , f'''test requires torch version >= {version}''' )(a )
def lowerCAmelCase_ ( a : Any ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(a )
def lowerCAmelCase_ ( a : str ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(a )
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(a )
__A : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(a )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int = True
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
a__ = tempfile.mkdtemp()
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase__ ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase_ ( a : List[str] ):
a__ = AcceleratorState()
a__ = tensor[None].clone().to(state.device )
a__ = gather(a ).cpu()
a__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , a ):
return False
return True
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
a__ = returncode
a__ = stdout
a__ = stderr
async def lowerCAmelCase_ ( a : Any , a : int ):
while True:
a__ = await stream.readline()
if line:
callback(a )
else:
break
async def lowerCAmelCase_ ( a : int , a : Tuple=None , a : Optional[Any]=None , a : Tuple=None , a : str=False , a : Dict=False ):
if echo:
print('\nRunning: ' , ' '.join(a ) )
a__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ = []
a__ = []
def tee(a : str , a : Optional[Any] , a : Any , a : Optional[int]="" ):
a__ = line.decode('utf-8' ).rstrip()
sink.append(a )
if not quiet:
print(a , a , file=a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a : tee(a , a , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a : tee(a , a , sys.stderr , label='stderr:' ) ) ),
] , timeout=a , )
return _RunOutput(await p.wait() , a , a )
def lowerCAmelCase_ ( a : Union[str, Any] , a : str=None , a : Dict=None , a : List[Any]=180 , a : Optional[Any]=False , a : int=True ):
a__ = asyncio.get_event_loop()
a__ = loop.run_until_complete(
_stream_subprocess(a , env=a , stdin=a , timeout=a , quiet=a , echo=a ) )
a__ = ' '.join(a )
if result.returncode > 0:
a__ = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _UpperCamelCase ( _A ):
'''simple docstring'''
pass
def lowerCAmelCase_ ( a : List[str] , a : Dict=False ):
try:
a__ = subprocess.check_output(a , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(a , 'decode' ):
a__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(a )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 394
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'xlm-roberta-xl'
def __init__( self , lowercase_=250_880 , lowercase_=2_560 , lowercase_=36 , lowercase_=32 , lowercase_=10_240 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=514 , lowercase_=1 , lowercase_=0.02 , lowercase_=1e-05 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_="absolute" , lowercase_=True , lowercase_=None , **lowercase_ , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
_snake_case : Optional[Any] = vocab_size
_snake_case : Any = hidden_size
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : int = hidden_act
_snake_case : Dict = intermediate_size
_snake_case : Optional[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : List[Any] = type_vocab_size
_snake_case : Optional[Any] = initializer_range
_snake_case : str = layer_norm_eps
_snake_case : Any = position_embedding_type
_snake_case : Optional[int] = use_cache
_snake_case : List[str] = classifier_dropout
class lowercase_ ( __snake_case ):
@property
def UpperCamelCase ( self ):
if self.task == "multiple-choice":
_snake_case : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_snake_case : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 580
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'summarization'
_lowerCamelCase = ['loss']
_lowerCamelCase = ROUGE_KEYS
_lowerCamelCase = 'rouge2'
def __init__( self , lowercase_ , **lowercase_ ):
if hparams.sortish_sampler and hparams.gpus > 1:
_snake_case : List[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(lowercase_ , num_labels=lowercase_ , mode=self.mode , **lowercase_ )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
_snake_case : Union[str, Any] = Path(self.output_dir ) / "metrics.json"
_snake_case : Tuple = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
_snake_case : Union[str, Any] = 0
_snake_case : List[str] = defaultdict(lowercase_ )
_snake_case : Union[str, Any] = self.config.model_type
_snake_case : int = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
_snake_case : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_snake_case : str = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
_snake_case : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_snake_case : Optional[Any] = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_snake_case : List[Any] = get_git_info()["repo_sha"]
_snake_case : Optional[Any] = hparams.num_workers
_snake_case : List[str] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase_ ):
_snake_case : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_snake_case : int = self.decoder_start_token_id
_snake_case : Any = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
_snake_case : int = False
_snake_case : str = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_snake_case : int = self.hparams.eval_max_gen_length
else:
_snake_case : List[str] = self.model.config.max_length
_snake_case : str = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Optional[int] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(lowercase_ , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
_snake_case : List[Any] = True
return readable_batch
def UpperCamelCase ( self , lowercase_ , **lowercase_ ):
return self.model(lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Optional[Any] = self.tokenizer.batch_decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
return lmap(str.strip , lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Any = self.tokenizer.pad_token_id
_snake_case ,_snake_case : List[Any] = batch["input_ids"], batch["attention_mask"]
_snake_case : Optional[int] = batch["labels"]
if isinstance(self.model , lowercase_ ):
_snake_case : Optional[int] = self.model._shift_right(lowercase_ )
else:
_snake_case : Union[str, Any] = shift_tokens_right(lowercase_ , lowercase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_snake_case : Union[str, Any] = decoder_input_ids
self.save_readable_batch(lowercase_ )
_snake_case : List[str] = self(lowercase_ , attention_mask=lowercase_ , decoder_input_ids=lowercase_ , use_cache=lowercase_ )
_snake_case : Optional[int] = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_snake_case : List[str] = nn.CrossEntropyLoss(ignore_index=lowercase_ )
assert lm_logits.shape[-1] == self.vocab_size
_snake_case : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_snake_case : Optional[int] = nn.functional.log_softmax(lowercase_ , dim=-1 )
_snake_case ,_snake_case : Tuple = label_smoothed_nll_loss(
lowercase_ , lowercase_ , self.hparams.label_smoothing , ignore_index=lowercase_ )
return (loss,)
@property
def UpperCamelCase ( self ):
return self.tokenizer.pad_token_id
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = self._step(lowercase_ )
_snake_case : str = dict(zip(self.loss_names , lowercase_ ) )
# tokens per batch
_snake_case : str = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
_snake_case : Union[str, Any] = batch["input_ids"].shape[0]
_snake_case : str = batch["input_ids"].eq(self.pad ).sum()
_snake_case : Union[str, Any] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
return self._generative_step(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_="val" ):
self.step_count += 1
_snake_case : Optional[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_snake_case : List[str] = losses["loss"]
_snake_case : str = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
_snake_case : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_snake_case : torch.FloatTensor = torch.tensor(lowercase_ ).type_as(lowercase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowercase_ )
_snake_case : Optional[int] = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
_snake_case : Optional[Any] = self.step_count
self.metrics[prefix].append(lowercase_ ) # callback writes this to self.metrics_save_path
_snake_case : str = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
return calculate_rouge(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_snake_case : Optional[Any] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=lowercase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_snake_case : int = (time.time() - ta) / batch["input_ids"].shape[0]
_snake_case : List[str] = self.ids_to_clean_text(lowercase_ )
_snake_case : List[str] = self.ids_to_clean_text(batch["labels"] )
_snake_case : Tuple = self._step(lowercase_ )
_snake_case : Optional[Any] = dict(zip(self.loss_names , lowercase_ ) )
_snake_case : Dict = self.calc_generative_metrics(lowercase_ , lowercase_ )
_snake_case : int = np.mean(lmap(lowercase_ , lowercase_ ) )
base_metrics.update(gen_time=lowercase_ , gen_len=lowercase_ , preds=lowercase_ , target=lowercase_ , **lowercase_ )
return base_metrics
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
return self._generative_step(lowercase_ )
def UpperCamelCase ( self , lowercase_ ):
return self.validation_epoch_end(lowercase_ , prefix="test" )
def UpperCamelCase ( self , lowercase_ ):
_snake_case : Tuple = self.n_obs[type_path]
_snake_case : Any = self.target_lens[type_path]
_snake_case : Tuple = self.dataset_class(
self.tokenizer , type_path=lowercase_ , n_obs=lowercase_ , max_target_length=lowercase_ , **self.dataset_kwargs , )
return dataset
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = False ):
_snake_case : Tuple = self.get_dataset(lowercase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_snake_case : int = dataset.make_sortish_sampler(lowercase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase_ , batch_size=lowercase_ , collate_fn=dataset.collate_fn , shuffle=lowercase_ , num_workers=self.num_workers , sampler=lowercase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_snake_case : Any = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowercase_ , batch_sampler=lowercase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowercase_ , batch_size=lowercase_ , collate_fn=dataset.collate_fn , shuffle=lowercase_ , num_workers=self.num_workers , sampler=lowercase_ , )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=lowercase_ )
return dataloader
def UpperCamelCase ( self ):
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def UpperCamelCase ( self ):
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase ( lowercase_ , lowercase_ ):
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
add_generic_args(lowercase_ , lowercase_ )
parser.add_argument(
"--max_source_length" , default=1_024 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=lowercase_ )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=lowercase_ )
parser.add_argument("--max_tokens_per_batch" , type=lowercase_ , default=lowercase_ )
parser.add_argument("--logger_name" , type=lowercase_ , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=lowercase_ , default=-1 , required=lowercase_ , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=lowercase_ , default=500 , required=lowercase_ , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=lowercase_ , default=-1 , required=lowercase_ , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=lowercase_ , default="summarization" , required=lowercase_ , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=lowercase_ , default=0.0 , required=lowercase_ )
parser.add_argument("--src_lang" , type=lowercase_ , default="" , required=lowercase_ )
parser.add_argument("--tgt_lang" , type=lowercase_ , default="" , required=lowercase_ )
parser.add_argument("--eval_beams" , type=lowercase_ , default=lowercase_ , required=lowercase_ )
parser.add_argument(
"--val_metric" , type=lowercase_ , default=lowercase_ , required=lowercase_ , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=lowercase_ , default=lowercase_ , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=lowercase_ , default=1 , required=lowercase_ , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=lowercase_ , default=-1 , required=lowercase_ , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'translation'
_lowerCamelCase = ['loss']
_lowerCamelCase = ['bleu']
_lowerCamelCase = 'bleu'
def __init__( self , lowercase_ , **lowercase_ ):
super().__init__(lowercase_ , **lowercase_ )
_snake_case : Any = hparams.src_lang
_snake_case : Union[str, Any] = hparams.tgt_lang
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
return calculate_bleu(lowercase_ , lowercase_ )
def snake_case (__lowercase , __lowercase=None ) -> SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__lowercase )
check_output_dir(__lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
_snake_case : SummarizationModule = SummarizationModule(__lowercase )
else:
_snake_case : SummarizationModule = TranslationModule(__lowercase )
_snake_case : List[Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
_snake_case : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_snake_case : str = os.environ.get("WANDB_PROJECT" , __lowercase )
_snake_case : Any = WandbLogger(name=model.output_dir.name , project=__lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_snake_case : Dict = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
_snake_case : List[str] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_snake_case : Optional[int] = False
_snake_case : Optional[Any] = args.val_metric == "loss"
_snake_case : pl.Trainer = generic_train(
__lowercase , __lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __lowercase ) , early_stopping_callback=__lowercase , logger=__lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
_snake_case : Tuple = ""
_snake_case : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=__lowercase ) )
if checkpoints:
_snake_case : Tuple = checkpoints[-1]
_snake_case : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE : Optional[int] = pl.Trainer.add_argparse_args(parser)
__SCREAMING_SNAKE_CASE : Tuple = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
main(args)
| 580
| 1
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
# Initialise PyTorch model
SCREAMING_SNAKE_CASE = AlbertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE = AlbertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 247
|
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCamelCase = logging.getLogger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : int = 2 ) -> List[str]:
def get_dataset(SCREAMING_SNAKE_CASE_ : int ):
SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE = get_dataset(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = get_dataset(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
SCREAMING_SNAKE_CASE = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=None ) -> Any:
SCREAMING_SNAKE_CASE = []
for epoch in range(SCREAMING_SNAKE_CASE_ ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch
SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
def __A ( self , lowerCAmelCase__ ) -> Dict:
return x * self.a + self.b
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 , project_dir=lowerCAmelCase__ , automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'initial' )
accelerator.save_state(lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'checkpoint' )
accelerator.save_state(lowerCAmelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCAmelCase__ )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = train(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE = Accelerator()
with self.assertRaises(lowerCAmelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __A ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(lowerCAmelCase__ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE = scheduler.state_dict()
train(3 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotEqual(lowerCAmelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(lowerCAmelCase__ , scheduler.state_dict() )
def __A ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCAmelCase__ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCAmelCase__ , project_config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCAmelCase__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCamelCase = '''/tmp/accelerate/state_checkpointing'''
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__UpperCamelCase,__UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCamelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCamelCase,__UpperCamelCase,__UpperCamelCase,__UpperCamelCase,__UpperCamelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCamelCase,__UpperCamelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__UpperCamelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''')
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert (
param_device.type == torch.device('''cpu''').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''')
for group in optimizer.param_groups:
__UpperCamelCase = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''):
accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 247
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""LayoutLMv3FeatureExtractor"""]
__snake_case = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""DeiTFeatureExtractor"""]
__snake_case = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 400
| 1
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = 0
_a = len(UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase ):
return None
_a = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
_a = left
_a = point
elif point > right:
_a = right
_a = point
else:
if item < current_item:
_a = point - 1
else:
_a = point + 1
return None
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
_a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase , UpperCamelCase , UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase , UpperCamelCase , point + 1 , UpperCamelCase )
def snake_case_ (UpperCamelCase : List[str] ):
'''simple docstring'''
if collection != sorted(UpperCamelCase ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
_snake_case : int = 0
if debug == 1:
_snake_case : Optional[int] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
_snake_case : Tuple = 67
_snake_case : Tuple = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found')
| 22
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase__ = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
re.sub('<n>','',_SCREAMING_SNAKE_CASE ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_SCREAMING_SNAKE_CASE ) )
| 186
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCamelCase_ :
lowercase = PegasusConfig
lowercase = {}
lowercase = '''gelu'''
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=40 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> List[str]:
_a : Union[str, Any] = parent
_a : Dict = batch_size
_a : Dict = seq_length
_a : int = is_training
_a : List[Any] = use_labels
_a : Union[str, Any] = vocab_size
_a : Tuple = hidden_size
_a : List[str] = num_hidden_layers
_a : Tuple = num_attention_heads
_a : List[str] = intermediate_size
_a : Any = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : Optional[int] = eos_token_id
_a : Union[str, Any] = pad_token_id
_a : int = bos_token_id
def snake_case__( self ) -> str:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_a : Tuple = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def snake_case__( self , lowercase , lowercase ) -> int:
_a : Optional[Any] = TFPegasusModel(config=lowercase ).get_decoder()
_a : Optional[int] = inputs_dict['''input_ids''']
_a : Dict = input_ids[:1, :]
_a : Tuple = inputs_dict['''attention_mask'''][:1, :]
_a : Any = inputs_dict['''head_mask''']
_a : Dict = 1
# first forward pass
_a : Optional[int] = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
_a , _a : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_a : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a : Optional[int] = model(lowercase , attention_mask=lowercase )[0]
_a : int = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a : Dict = output_from_no_past[:, -3:, random_slice_idx]
_a : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
_a : Any = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_a : Union[str, Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_a : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_a : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowercase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowercase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowercase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
def snake_case__( self ) -> Tuple:
_a : List[str] = TFPegasusModelTester(self )
_a : Any = ConfigTester(self , config_class=lowercase )
def snake_case__( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__( self ) -> List[Any]:
_a : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
lowercase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowercase = '''google/pegasus-xsum'''
@cached_property
def snake_case__( self ) -> Any:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case__( self ) -> Optional[Any]:
_a : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case__( self , **lowercase ) -> str:
_a : int = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def snake_case__( self , **lowercase ) -> int:
_a : int = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors='''tf''' )
_a : Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
_a : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def snake_case__( self ) -> Optional[Any]:
self._assert_generated_batch_equal_expected()
| 307
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase )
class UpperCamelCase_ ( UpperCamelCase ):
def __init__( self , *lowercase , **lowercase ) -> str:
super().__init__(*lowercase , **lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case__( self , lowercase=None ) -> int:
_a : Optional[Any] = {}
if top_k is not None:
_a : Optional[Any] = top_k
return {}, {}, postprocess_params
def __call__( self , lowercase , **lowercase ) -> Dict:
return super().__call__(lowercase , **lowercase )
def snake_case__( self , lowercase ) -> List[str]:
_a : Optional[int] = load_image(lowercase )
_a : Optional[int] = self.image_processor(images=lowercase , return_tensors=self.framework )
return model_inputs
def snake_case__( self , lowercase ) -> Union[str, Any]:
_a : Union[str, Any] = self.model(**lowercase )
return model_outputs
def snake_case__( self , lowercase , lowercase=5 ) -> Union[str, Any]:
if top_k > self.model.config.num_labels:
_a : Dict = self.model.config.num_labels
if self.framework == "pt":
_a : Tuple = model_outputs.logits.softmax(-1 )[0]
_a , _a : Optional[int] = probs.topk(lowercase )
elif self.framework == "tf":
_a : Tuple = stable_softmax(model_outputs.logits , axis=-1 )[0]
_a : List[Any] = tf.math.top_k(lowercase , k=lowercase )
_a , _a : Dict = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
_a : Optional[int] = scores.tolist()
_a : Any = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )]
| 307
| 1
|
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCamelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCamelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCamelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any="binary" ):
'''simple docstring'''
__lowerCamelCase : int =simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average=_SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] ={}
for id_pred, label in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCamelCase : List[Any] =F'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
__lowerCamelCase : str =id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__lowerCamelCase : str =[(pred, label)]
__lowerCamelCase , __lowerCamelCase : Union[str, Any] =[], []
for question, preds_labels in question_map.items():
__lowerCamelCase , __lowerCamelCase : Optional[int] =zip(*_SCREAMING_SNAKE_CASE )
__lowerCamelCase : str =fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=_SCREAMING_SNAKE_CASE , average='''macro''' )
fas.append(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =int(sum(pred == label for pred, label in preds_labels ) == len(_SCREAMING_SNAKE_CASE ) )
ems.append(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[Any] =float(sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : Optional[Any] =sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
__lowerCamelCase : Dict =float(fa_score(y_true=_SCREAMING_SNAKE_CASE , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self :str ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self :Tuple ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self :Union[str, Any] , __lowercase :Tuple , __lowercase :str ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowercase , __lowercase )}
elif self.config_name == "cb":
return acc_and_fa(__lowercase , __lowercase , fa_avg='''macro''' )
elif self.config_name == "record":
__lowerCamelCase : List[str] =[
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
__lowerCamelCase : Optional[Any] ={pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__lowercase , __lowercase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowercase , __lowercase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]''' )
| 179
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 585
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a_ : List[str] = get_tests_dir('fixtures')
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=a) as mock_head:
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit')
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self) -> str:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
with self.assertRaises(a):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants')
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor')
self.assertIsNotNone(a)
@is_staging_test
class _snake_case ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> Tuple:
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(a)
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls) -> Tuple:
try:
delete_repo(token=cls._token , repo_id='test-image-processor')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(a)
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''')
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a))
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a , repo_id='test-image-processor' , push_to_hub=a , use_auth_token=self._token)
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''')
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a))
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained(a)
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token)
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained('valid_org/test-image-processor')
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
a , repo_id='valid_org/test-image-processor-org' , push_to_hub=a , use_auth_token=self._token)
SCREAMING_SNAKE_CASE = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org')
for k, v in image_processor.__dict__.items():
self.assertEqual(a , getattr(a , a))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(a)
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=a)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor')
| 444
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self , a , a=99 , a=13 , a=7 , a=9 , a=True , a=True , a=False , a=32 , a=5 , a=4 , a=37 , a=8 , a=0.1 , a=0.0_02 , a=1 , a=0 , a=0 , a=None , a=None , ) -> List[str]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = encoder_seq_length
SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE = self.decoder_seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = d_ff
SCREAMING_SNAKE_CASE = relative_attention_num_buckets
SCREAMING_SNAKE_CASE = dropout_rate
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = decoder_start_token_id
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
return TaConfig.from_pretrained('google/umt5-base')
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Optional[int]:
if attention_mask is None:
SCREAMING_SNAKE_CASE = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=a)
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=a)
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=a)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE = input_ids.clamp(self.pad_token_id + 1)
SCREAMING_SNAKE_CASE = decoder_input_ids.clamp(self.pad_token_id + 1)
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = config.num_attention_heads
SCREAMING_SNAKE_CASE = self.prepare_inputs_dict(a , a , a)
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , ) -> Dict:
SCREAMING_SNAKE_CASE = UMTaModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(
input_ids=a , decoder_input_ids=a , attention_mask=a , decoder_attention_mask=a , )
SCREAMING_SNAKE_CASE = model(input_ids=a , decoder_input_ids=a)
SCREAMING_SNAKE_CASE = result.last_hidden_state
SCREAMING_SNAKE_CASE = result.past_key_values
SCREAMING_SNAKE_CASE = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(a) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a , a , a , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = UMTaModel(config=a).get_decoder().to(a).eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(a , use_cache=a)
SCREAMING_SNAKE_CASE = model(a)
SCREAMING_SNAKE_CASE = model(a , use_cache=a)
self.parent.assertTrue(len(a) == len(a))
self.parent.assertTrue(len(a) == len(a) + 1)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1)
SCREAMING_SNAKE_CASE = model(a)['last_hidden_state']
SCREAMING_SNAKE_CASE = model(a , past_key_values=a)['last_hidden_state']
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1]).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3))
def SCREAMING_SNAKE_CASE__ ( self , a , a , ) -> str:
SCREAMING_SNAKE_CASE = UMTaModel(config=a).to(a).half().eval()
SCREAMING_SNAKE_CASE = model(**a)['last_hidden_state']
self.parent.assertFalse(torch.isnan(a).any().item())
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
_lowercase : Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowercase : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowercase : Tuple = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowercase : int = True
_lowercase : List[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : int = True
_lowercase : Any = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowercase : int = [0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = UMTaModelTester(self)
@unittest.skip('Test has a segmentation fault on torch 1.8.0')
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = UMTaModel(config_and_inputs[0]).to(a)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=a , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = config_and_inputs[0]
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(a).eval()
model.to(a)
SCREAMING_SNAKE_CASE = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=a),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=a),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=a),
}
for attn_name, (name, mask) in zip(a , head_masking.items()):
SCREAMING_SNAKE_CASE = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE = torch.ones(
config.num_decoder_layers , config.num_heads , device=a)
SCREAMING_SNAKE_CASE = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=a , return_dict_in_generate=a , **a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged')
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=a).to(a)
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=a , legacy=a)
SCREAMING_SNAKE_CASE = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
SCREAMING_SNAKE_CASE = tokenizer(a , return_tensors='pt' , padding=a).input_ids
# fmt: off
SCREAMING_SNAKE_CASE = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
])
# fmt: on
torch.testing.assert_allclose(a , a)
SCREAMING_SNAKE_CASE = model.generate(input_ids.to(a))
SCREAMING_SNAKE_CASE = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(a)
self.assertEqual(a , a)
| 444
| 1
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ : str = 25_60_47
SCREAMING_SNAKE_CASE__ : Optional[int] = 25_61_45
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ):
__lowerCamelCase = NllbTokenizer
__lowerCamelCase = NllbTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = {}
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Any = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = NllbTokenizer(_lowerCAmelCase , keep_accents=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : str = tokenizer.convert_ids_to_tokens(_lowerCAmelCase )
self.assertListEqual(
_lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : str = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : int = tokenizer_r.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : Dict = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : str = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : Tuple = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : List[str] = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
UpperCAmelCase__ : int = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCAmelCase , _lowerCAmelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase__ : List[str] = tokenizer_r.save_pretrained(_lowerCAmelCase , legacy_format=_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.save_pretrained(_lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : int = tokenizer_r.from_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : str = tokenizer_p.from_pretrained(_lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCAmelCase , _lowerCAmelCase ) )
shutil.rmtree(_lowerCAmelCase )
@require_torch
def __UpperCAmelCase ( self ):
if not self.test_seqaseq:
return
UpperCAmelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
UpperCAmelCase__ : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
UpperCAmelCase__ : Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
UpperCAmelCase__ : Optional[Any] = tokenizer.prepare_seqaseq_batch(
_lowerCAmelCase , tgt_texts=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCAmelCase__ : Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=_lowerCAmelCase , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , _lowerCAmelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
UpperCAmelCase__ : int = [AddedToken("""<special>""" , lstrip=_lowerCAmelCase )]
UpperCAmelCase__ : Any = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : str = tokenizer_r.encode("""Hey this is a <special> token""" )
UpperCAmelCase__ : Optional[Any] = tokenizer_r.encode("""<special>""" , add_special_tokens=_lowerCAmelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = tokenizer_p.encode("""Hey this is a <special> token""" )
UpperCAmelCase__ : Dict = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
__lowerCamelCase = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__lowerCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__lowerCamelCase = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def __UpperCAmelCase ( cls ):
UpperCAmelCase__ : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
UpperCAmelCase__ : Union[str, Any] = 1
return cls
def __UpperCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.assertIn(_lowerCAmelCase , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase__ : Union[str, Any] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
UpperCAmelCase__ : Optional[Any] = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
UpperCAmelCase__ : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Optional[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _lowerCAmelCase )
UpperCAmelCase__ : Any = 10
UpperCAmelCase__ : Dict = self.tokenizer(_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
def __UpperCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase__ : List[str] = NllbTokenizer.from_pretrained(_lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCAmelCase )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
UpperCAmelCase__ : Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase__ : List[Any] = targets["""input_ids"""]
UpperCAmelCase__ : Dict = shift_tokens_right(
_lowerCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Tuple = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 79
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = KandinskyVaaInpaintPipeline
A_ : str = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
A_ : Optional[int] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
A_ : Optional[Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
A_ : List[str] = False
@property
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
return 32
@property
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
return 32
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
return self.time_input_dim
@property
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
return 100
@property
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
__magic_name__ : int = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__magic_name__ : List[str] = UNetaDConditionModel(**_A )
return model
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : str ) -> str:
torch.manual_seed(0 )
__magic_name__ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : str = self.dummy_unet
__magic_name__ : Tuple = self.dummy_movq
__magic_name__ : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='epsilon' , thresholding=_A , )
__magic_name__ : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCAmelCase ( self : int , _A : Union[str, Any] , _A : Union[str, Any]=0 ) -> Optional[int]:
__magic_name__ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
__magic_name__ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : List[str] = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) )
# create mask
__magic_name__ : List[Any] = np.ones((64, 64) , dtype=np.floataa )
__magic_name__ : Optional[int] = 0
if str(_A ).startswith('mps' ):
__magic_name__ : Union[str, Any] = torch.manual_seed(_A )
else:
__magic_name__ : Any = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : Optional[Any] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : Dict = 'cpu'
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : str = self.pipeline_class(**_A )
__magic_name__ : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Tuple = pipe(**self.get_dummy_inputs(_A ) )
__magic_name__ : Tuple = output.images
__magic_name__ : str = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
__magic_name__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Dict = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def __lowerCAmelCase ( self : Any ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
__magic_name__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__magic_name__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__magic_name__ : List[Any] = np.ones((768, 768) , dtype=np.floataa )
__magic_name__ : Optional[int] = 0
__magic_name__ : List[Any] = 'a hat'
__magic_name__ : Optional[Any] = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
__magic_name__ : Dict = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__magic_name__ : List[Any] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__magic_name__ : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__magic_name__ , __magic_name__ : Any = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__magic_name__ : Optional[Any] = pipeline(
image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
__magic_name__ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 561
| 0
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
A_ : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = '''AutoTokenizer'''
_UpperCAmelCase = ['''tokenizer''']
_UpperCAmelCase = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(__lowerCAmelCase )
a = speaker_embeddings
@classmethod
def A ( cls : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any]="speaker_embeddings_path.json" , **__lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
a = get_file_from_repo(
__lowerCAmelCase , __lowerCAmelCase , subfolder=kwargs.pop("subfolder" , __lowerCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , __lowerCAmelCase ) , force_download=kwargs.pop("force_download" , __lowerCAmelCase ) , proxies=kwargs.pop("proxies" , __lowerCAmelCase ) , resume_download=kwargs.pop("resume_download" , __lowerCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , __lowerCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , __lowerCAmelCase ) , revision=kwargs.pop("revision" , __lowerCAmelCase ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(__lowerCAmelCase , __lowerCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
a = None
else:
with open(__lowerCAmelCase ) as speaker_embeddings_json:
a = json.load(__lowerCAmelCase )
else:
a = None
a = AutoTokenizer.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
return cls(tokenizer=__lowerCAmelCase , speaker_embeddings=__lowerCAmelCase )
def A ( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any]="speaker_embeddings_path.json" , __lowerCAmelCase : str="speaker_embeddings" , __lowerCAmelCase : bool = False , **__lowerCAmelCase : Dict , ) -> Optional[Any]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__lowerCAmelCase , __lowerCAmelCase , "v2" ) , exist_ok=__lowerCAmelCase )
a = {}
a = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
a = self._load_voice_preset(__lowerCAmelCase )
a = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , __lowerCAmelCase , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=__lowerCAmelCase , )
a = os.path.join(__lowerCAmelCase , f"""{prompt_key}_{key}.npy""" )
a = tmp_dict
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
super().save_pretrained(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def A ( self : List[Any] , __lowerCAmelCase : str = None , **__lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a = self.speaker_embeddings[voice_preset]
a = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
a = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , __lowerCAmelCase ) , cache_dir=kwargs.pop("cache_dir" , __lowerCAmelCase ) , force_download=kwargs.pop("force_download" , __lowerCAmelCase ) , proxies=kwargs.pop("proxies" , __lowerCAmelCase ) , resume_download=kwargs.pop("resume_download" , __lowerCAmelCase ) , local_files_only=kwargs.pop("local_files_only" , __lowerCAmelCase ) , use_auth_token=kwargs.pop("use_auth_token" , __lowerCAmelCase ) , revision=kwargs.pop("revision" , __lowerCAmelCase ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
a = np.load(__lowerCAmelCase )
return voice_preset_dict
def A ( self : Optional[int] , __lowerCAmelCase : Optional[dict] = None ) -> Dict:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : List[Any] , __lowerCAmelCase : str=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]="pt" , __lowerCAmelCase : List[Any]=256 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=False , **__lowerCAmelCase : List[Any] , ) -> Tuple:
"""simple docstring"""
if voice_preset is not None and not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if (
isinstance(__lowerCAmelCase , __lowerCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
a = self._load_voice_preset(__lowerCAmelCase )
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not voice_preset.endswith(".npz" ):
a = voice_preset + ".npz"
a = np.load(__lowerCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(__lowerCAmelCase , **__lowerCAmelCase )
a = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
a = self.tokenizer(
__lowerCAmelCase , return_tensors=__lowerCAmelCase , padding="max_length" , max_length=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
if voice_preset is not None:
a = voice_preset
return encoded_text
| 706
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : str = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ):
_UpperCAmelCase = '''focalnet'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = use_conv_embed
a = hidden_sizes
a = depths
a = focal_levels
a = focal_windows
a = hidden_act
a = mlp_ratio
a = hidden_dropout_prob
a = drop_path_rate
a = use_layerscale
a = layerscale_value
a = use_post_layernorm
a = use_post_layernorm_in_modulation
a = normalize_modulator
a = initializer_range
a = layer_norm_eps
a = encoder_stride
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 32
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=100 , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=3 , ) -> Any:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = patch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
_SCREAMING_SNAKE_CASE = num_patches + 1
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def snake_case_( self , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = FlaxBeitModel(config=A )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_( self , A , A , A ) -> List[Any]:
_SCREAMING_SNAKE_CASE = FlaxBeitForMaskedImageModeling(config=A )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case_( self , A , A , A ) -> int:
_SCREAMING_SNAKE_CASE = self.type_sequence_label_size
_SCREAMING_SNAKE_CASE = FlaxBeitForImageClassification(config=A )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = FlaxBeitForImageClassification(A )
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE = model(A )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def snake_case_( self ) -> None:
_SCREAMING_SNAKE_CASE = FlaxBeitModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def snake_case_( self ) -> Any:
self.config_tester.run_common_tests()
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(A )
_SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE = self._prepare_for_class(A , A )
_SCREAMING_SNAKE_CASE = model_class(A )
@jax.jit
def model_jitted(A , **A ):
return model(pixel_values=A , **A )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE = model_jitted(**A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def snake_case_( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(A )
def lowerCamelCase ( ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_( self ) -> Tuple:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
_SCREAMING_SNAKE_CASE = np.ones((1, 196) , dtype=A )
# forward pass
_SCREAMING_SNAKE_CASE = model(pixel_values=A , bool_masked_pos=A )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
_SCREAMING_SNAKE_CASE = (1, 196, 8192)
self.assertEqual(logits.shape , A )
_SCREAMING_SNAKE_CASE = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , A , atol=1e-2 ) )
@slow
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A , return_tensors="""np""" )
# forward pass
_SCREAMING_SNAKE_CASE = model(**A )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
_SCREAMING_SNAKE_CASE = (1, 1000)
self.assertEqual(logits.shape , A )
_SCREAMING_SNAKE_CASE = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1e-4 ) )
_SCREAMING_SNAKE_CASE = 281
self.assertEqual(logits.argmax(-1 ).item() , A )
@slow
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
_SCREAMING_SNAKE_CASE = self.default_image_processor
_SCREAMING_SNAKE_CASE = prepare_img()
_SCREAMING_SNAKE_CASE = image_processor(images=A , return_tensors="""np""" )
# forward pass
_SCREAMING_SNAKE_CASE = model(**A )
_SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
_SCREAMING_SNAKE_CASE = (1, 2_1841)
self.assertEqual(logits.shape , A )
_SCREAMING_SNAKE_CASE = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , A , atol=1e-4 ) )
_SCREAMING_SNAKE_CASE = 2396
self.assertEqual(logits.argmax(-1 ).item() , A )
| 314
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 314
| 1
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowerCamelCase ( _a ):
"""simple docstring"""
return EnvironmentCommand()
def _lowerCamelCase ( _a ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( a__ ):
_lowerCamelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=a__ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=a__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=a__ )
def __init__( self , a__ , *a__ ):
_lowerCamelCase = accelerate_config_file
def _UpperCAmelCase ( self ):
_lowerCamelCase = '''not installed'''
if is_safetensors_available():
import safetensors
_lowerCamelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_lowerCamelCase = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_lowerCamelCase = '''not installed'''
_lowerCamelCase = _lowerCamelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a__ ):
_lowerCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_lowerCamelCase = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a__ , a__ )
else f'''\t{accelerate_config}'''
)
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_torch_available():
import torch
_lowerCamelCase = torch.__version__
_lowerCamelCase = torch.cuda.is_available()
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
_lowerCamelCase = tf.__version__
try:
# deprecated in v2.1
_lowerCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCamelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''not installed'''
_lowerCamelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCamelCase = flax.__version__
_lowerCamelCase = jax.__version__
_lowerCamelCase = jaxlib.__version__
_lowerCamelCase = jax.lib.xla_bridge.get_backend().platform
_lowerCamelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'''{safetensors_version}''',
'''Accelerate version''': f'''{accelerate_version}''',
'''Accelerate config''': f'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': f'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': f'''{flax_version} ({jax_backend})''',
'''Jax version''': f'''{jax_version}''',
'''JaxLib version''': f'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(a__ ) )
return info
@staticmethod
def _UpperCAmelCase ( a__ ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 717
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = "xlm-prophetnet"
_UpperCamelCase = ["past_key_values"]
_UpperCamelCase = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self , a__ = 0.1 , a__ = "gelu" , a__ = 3_05_22 , a__ = 10_24 , a__ = 40_96 , a__ = 12 , a__ = 16 , a__ = 40_96 , a__ = 12 , a__ = 16 , a__ = 0.1 , a__ = 0.1 , a__ = 5_12 , a__ = 0.02 , a__ = True , a__ = True , a__ = 0 , a__ = 2 , a__ = 32 , a__ = 1_28 , a__ = False , a__ = 0.0 , a__ = True , a__ = 0 , a__ = 1 , a__ = 2 , **a__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = num_encoder_layers
_lowerCamelCase = num_encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = num_decoder_layers
_lowerCamelCase = num_decoder_attention_heads
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = init_std # Normal(0, this parameter)
_lowerCamelCase = activation_function
# parameters for xlmprophetnet
_lowerCamelCase = ngram
_lowerCamelCase = num_buckets
_lowerCamelCase = relative_max_distance
_lowerCamelCase = disable_ngram_loss
_lowerCamelCase = eps
# 3 Types of Dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = dropout
_lowerCamelCase = use_cache
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , add_cross_attention=a__ , decoder_start_token_id=a__ , **a__ , )
@property
def _UpperCAmelCase ( self ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _UpperCAmelCase ( self , a__ ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 297
| 0
|
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict=False ) -> Union[str, Any]:
'''simple docstring'''
try:
UpperCAmelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ = strtobool(__UpperCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
SCREAMING_SNAKE_CASE_: Optional[Any] =parse_flag_from_env('RUN_SLOW', default=False)
SCREAMING_SNAKE_CASE_: List[str] =parse_flag_from_env('RUN_REMOTE', default=False)
SCREAMING_SNAKE_CASE_: Optional[int] =parse_flag_from_env('RUN_LOCAL', default=True)
SCREAMING_SNAKE_CASE_: Any =parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
SCREAMING_SNAKE_CASE_: List[Any] =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
SCREAMING_SNAKE_CASE_: int =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
SCREAMING_SNAKE_CASE_: int =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
SCREAMING_SNAKE_CASE_: Any =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
SCREAMING_SNAKE_CASE_: Optional[int] =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
SCREAMING_SNAKE_CASE_: Optional[int] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
SCREAMING_SNAKE_CASE_: List[str] =pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
UpperCAmelCase_ = unittest.skip("test requires faiss" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
try:
import regex # noqa
except ImportError:
UpperCAmelCase_ = unittest.skip("test requires regex" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
UpperCAmelCase_ = unittest.skip("test requires elasticsearch" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
UpperCAmelCase_ = unittest.skip("test requires sqlalchemy" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> int:
'''simple docstring'''
if not config.TORCH_AVAILABLE:
UpperCAmelCase_ = unittest.skip("test requires PyTorch" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if not config.TF_AVAILABLE:
UpperCAmelCase_ = unittest.skip("test requires TensorFlow" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
if not config.JAX_AVAILABLE:
UpperCAmelCase_ = unittest.skip("test requires JAX" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : str ) -> List[str]:
'''simple docstring'''
if not config.PIL_AVAILABLE:
UpperCAmelCase_ = unittest.skip("test requires Pillow" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(__UpperCAmelCase )
else:
return test_case
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Any:
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(__UpperCAmelCase )
else:
return test_case
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Dict:
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCAmelCase )
else:
return test_case
def lowerCAmelCase_ ( snake_case_ : List[Any] ) -> Any:
'''simple docstring'''
def _require_spacy_model(snake_case_ : Optional[int] ):
try:
import spacy # noqa F401
spacy.load(__UpperCAmelCase )
except ImportError:
return unittest.skip("test requires spacy" )(__UpperCAmelCase )
except OSError:
return unittest.skip("test requires spacy model \'{}\'".format(__UpperCAmelCase ) )(__UpperCAmelCase )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase_ ( snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(__UpperCAmelCase )
else:
return test_case
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(__UpperCAmelCase )
else:
return test_case
def lowerCAmelCase_ ( snake_case_ : str ) -> Optional[Any]:
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
UpperCAmelCase_ = unittest.skip("test is slow" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str:
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
UpperCAmelCase_ = unittest.skip("test is local" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : Any ) -> str:
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCAmelCase_ = unittest.skip("test is packaged" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( snake_case_ : str ) -> int:
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
UpperCAmelCase_ = unittest.skip("test requires remote" )(__UpperCAmelCase )
return test_case
def lowerCAmelCase_ ( *snake_case_ : Any ) -> Dict:
'''simple docstring'''
def decorate(cls : Dict ):
for name, fn in cls.__dict__.items():
if callable(__UpperCAmelCase ) and name.startswith("test" ):
for decorator in decorators:
UpperCAmelCase_ = decorator(__UpperCAmelCase )
setattr(cls , __UpperCAmelCase , __UpperCAmelCase )
return cls
return decorate
class __A ( UpperCamelCase__ ):
pass
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = 0
a__ : str = 1
a__ : Optional[int] = 2
@contextmanager
def lowerCAmelCase_ ( snake_case_ : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , snake_case_ : Tuple=1E-1_6 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = requests.Session().request
def timeout_request(snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[str] , **snake_case_ : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
UpperCAmelCase_ = 'https://10.255.255.1'
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCAmelCase_ = timeout
try:
return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCAmelCase_ = url
UpperCAmelCase_ = e.args[0]
UpperCAmelCase_ = (max_retry_error.args[0].replace("10.255.255.1" , f"""OfflineMock[{url}]""" ),)
UpperCAmelCase_ = (max_retry_error,)
raise
def raise_connection_error(snake_case_ : Tuple , snake_case_ : str , **snake_case_ : int ):
raise requests.ConnectionError("Offline mode is enabled." , request=__UpperCAmelCase )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , __UpperCAmelCase ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , __UpperCAmelCase ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def lowerCAmelCase_ ( *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir:
try:
os.chdir(__UpperCAmelCase )
yield
finally:
os.chdir(__UpperCAmelCase )
@contextmanager
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase_ ( ) -> Any:
'''simple docstring'''
import gc
gc.collect()
UpperCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Dict ) -> Tuple:
'''simple docstring'''
return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist()
def lowerCAmelCase_ ( snake_case_ : Any ) -> Optional[int]:
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case_ : Optional[int] , *snake_case_ : List[Any] , **snake_case_ : List[Any] ):
try:
return func(*__UpperCAmelCase , **__UpperCAmelCase )
except HTTPError as err:
if str(__UpperCAmelCase ).startswith("500" ) or str(__UpperCAmelCase ).startswith("502" ):
pytest.xfail(str(__UpperCAmelCase ) )
raise err
return decorator.decorator(_wrapper , __UpperCAmelCase )
class __A :
def __init__(self : Optional[int] , __a : Tuple , __a : Optional[Any] , __a : List[Any] ):
UpperCAmelCase_ = returncode
UpperCAmelCase_ = stdout
UpperCAmelCase_ = stderr
async def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Any ) -> Union[str, Any]:
'''simple docstring'''
while True:
UpperCAmelCase_ = await stream.readline()
if line:
callback(__UpperCAmelCase )
else:
break
async def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str=None , snake_case_ : int=None , snake_case_ : str=None , snake_case_ : Any=False , snake_case_ : Optional[int]=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(__UpperCAmelCase ) )
UpperCAmelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def tee(snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]="" ):
UpperCAmelCase_ = line.decode("utf-8" ).rstrip()
sink.append(__UpperCAmelCase )
if not quiet:
print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda snake_case_ : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda snake_case_ : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label="stderr:" ) ),
] , timeout=__UpperCAmelCase , )
return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , snake_case_ : List[Any]=1_80 , snake_case_ : Tuple=False , snake_case_ : Optional[int]=True ) -> _RunOutput:
'''simple docstring'''
UpperCAmelCase_ = asyncio.get_event_loop()
UpperCAmelCase_ = loop.run_until_complete(
_stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) )
UpperCAmelCase_ = ' '.join(__UpperCAmelCase )
if result.returncode > 0:
UpperCAmelCase_ = '\n'.join(result.stderr )
raise RuntimeError(
f"""\'{cmd_str}\' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""\'{cmd_str}\' produced no output.""" )
return result
def lowerCAmelCase_ ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
UpperCAmelCase_ = re.sub(R"^gw" , "" , __UpperCAmelCase , 0 , re.M )
return int(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = 2_95_00
UpperCAmelCase_ = pytest_xdist_worker_id()
return port + uniq_delta
| 78
|
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a = 'base_with_context'
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
snake_case: Tuple =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case: Dict =weights[f'''layers_{lyr_num}''']
snake_case: str =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case: Any =ly_weight['attention']
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Any =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
snake_case: Dict =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case: List[Any] =weights[f'''layers_{lyr_num}''']
snake_case: Tuple =ly_weight['attention']
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Tuple =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Any =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
snake_case: Tuple =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
snake_case: Any =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case: List[str] =weights[f'''layers_{lyr_num}''']
snake_case: Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
snake_case: int =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
snake_case: str =ly_weight['self_attention']
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Optional[Any] =ly_weight['MultiHeadDotProductAttention_0']
snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
snake_case: int =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case: Tuple =jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
snake_case: str =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
snake_case: List[Any] =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
snake_case: Optional[Any] =inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
snake_case: List[str] =inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
snake_case: List[Any] =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
snake_case: Optional[Any] =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case: Optional[Any] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case: List[Any] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case: Optional[Any] =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __UpperCAmelCase )
snake_case: Optional[Any] =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __UpperCAmelCase )
snake_case: Union[str, Any] =load_decoder(ta_checkpoint['target']['decoder'] , __UpperCAmelCase )
snake_case: int =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
snake_case: Optional[Any] =SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
a = parser.parse_args()
main(args)
| 350
| 0
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class a ( lowercase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = "arrow" , **UpperCamelCase_ , ):
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Dict = load_from_cache_file
UpperCAmelCase__ : Tuple = file_format
UpperCAmelCase__ : List[str] = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def __snake_case ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase__ : Optional[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 254
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'unc-nlp/lxmert-base-uncased': 5_12,
}
UpperCamelCase__ = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class a ( lowercase ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[Any] = LxmertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCAmelCase__ : Any = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) )
UpperCAmelCase__ : Union[str, Any] = do_lower_case
UpperCAmelCase__ : Optional[int] = strip_accents
UpperCAmelCase__ : Optional[Any] = tokenize_chinese_chars
UpperCAmelCase__ : Dict = normalizer_class(**UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = do_lower_case
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 254
| 1
|
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ , A__ ):
return round(float(moles / volume ) * nfactor )
def UpperCamelCase_ ( A__ , A__ , A__ ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def UpperCamelCase_ ( A__ , A__ , A__ ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def UpperCamelCase_ ( A__ , A__ , A__ ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263
|
'''simple docstring'''
import re
def UpperCamelCase_ ( A__ ):
a_ = re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(A__ , A__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 263
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase__):
def A__ ( self : Optional[Any], __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
lowercase__ = input_file.read()
lowercase__ = regexp.search(__lowercase )
return match
def A__ ( self : str, __lowercase : str ):
with open(__lowercase, encoding="utf-8" ) as input_file:
lowercase__ = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
lowercase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase__ = regexp.finditer(__lowercase )
lowercase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self : Union[str, Any] ):
lowercase__ = Path("./datasets" )
lowercase__ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 721
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
for ch in input_str:
lowercase__ = ord(SCREAMING_SNAKE_CASE_ )
lowercase__ = pow(2 , SCREAMING_SNAKE_CASE_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
| 0
|
from maths.prime_check import is_prime
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = f'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693
|
from __future__ import annotations
_snake_case : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_snake_case : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = len(__snake_case )
for i in range(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
__SCREAMING_SNAKE_CASE = arr[j]
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
for i, outer in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = -1
for inner in arr[i + 1 :]:
if outer < inner:
__SCREAMING_SNAKE_CASE = inner
break
result.append(__snake_case )
return result
def _A ( __snake_case :list[float] ) -> list[float]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(__snake_case )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = [-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__SCREAMING_SNAKE_CASE = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_snake_case : Optional[Any] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 693
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_( lowercase_ : str , lowercase_ : dict ) -> str:
_lowerCamelCase = BeautifulSoup(requests.get(lowercase_ , params=lowercase_ ).content , '''html.parser''' )
_lowerCamelCase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
_lowerCamelCase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 3_0,
'''pages''': '''3979-3990''',
'''year''': 2_0_1_8,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 623
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCAmelCase_( lowercase_ : np.array ) -> np.array:
return vector * sigmoid(1.7_0_2 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623
| 1
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase__ :
"""simple docstring"""
@staticmethod
def _a ( *_A , **_A ):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[str] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
UpperCamelCase : List[Any] = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : str = vqa_pipeline(_A , top_k=1 )
self.assertEqual(
_A , [
[{"""score""": ANY(_A ), """answer""": ANY(_A )}],
[{"""score""": ANY(_A ), """answer""": ANY(_A )}],
] , )
@require_torch
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
UpperCamelCase : Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCamelCase : Optional[int] = """How many cats are there?"""
UpperCamelCase : Tuple = vqa_pipeline(image=_A , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_A , [{"""score""": ANY(_A ), """answer""": ANY(_A )}, {"""score""": ANY(_A ), """answer""": ANY(_A )}] )
UpperCamelCase : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_A , [{"""score""": ANY(_A ), """answer""": ANY(_A )}, {"""score""": ANY(_A ), """answer""": ANY(_A )}] )
@slow
@require_torch
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
UpperCamelCase : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCamelCase : str = """How many cats are there?"""
UpperCamelCase : List[Any] = vqa_pipeline(image=_A , question=_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
UpperCamelCase : Optional[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] )
UpperCamelCase : int = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def _a ( self ):
'''simple docstring'''
pass
| 102
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Tuple = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = size if size is not None else {"""height""": 256, """width""": 256}
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" )
UpperCamelCase : str = do_resize
UpperCamelCase : List[Any] = size
UpperCamelCase : Optional[int] = resample
UpperCamelCase : str = do_center_crop
UpperCamelCase : Union[str, Any] = crop_size
UpperCamelCase : List[Any] = do_rescale
UpperCamelCase : List[str] = rescale_factor
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size["""height"""], size["""width"""]) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
UpperCamelCase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : Any = image_std if image_std is not None else self.image_std
UpperCamelCase : Dict = size if size is not None else self.size
UpperCamelCase : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase : Dict = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name="""crop_size""" )
UpperCamelCase : int = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase : Dict = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
UpperCamelCase : int = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
UpperCamelCase : str = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
UpperCamelCase : Union[str, Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
UpperCamelCase : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
UpperCamelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 499
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowercase__ : Any , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Optional[int]=None , lowercase__ : Optional[Any]=None ):
'''simple docstring'''
if "." in tensor_name:
_lowerCAmelCase =tensor_name.split(""".""" )
for split in splits[:-1]:
_lowerCAmelCase =getattr(lowercase__ , lowercase__ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
_lowerCAmelCase =new_module
_lowerCAmelCase =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}." )
_lowerCAmelCase =tensor_name in module._buffers
_lowerCAmelCase =getattr(lowercase__ , lowercase__ )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_lowerCAmelCase =False
_lowerCAmelCase =False
if is_buffer or not is_bitsandbytes_available():
_lowerCAmelCase =False
_lowerCAmelCase =False
else:
_lowerCAmelCase =hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
_lowerCAmelCase =isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
_lowerCAmelCase =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_lowerCAmelCase =old_value.to(lowercase__ )
elif isinstance(lowercase__ , torch.Tensor ):
_lowerCAmelCase =value.to("""cpu""" )
if value.dtype == torch.inta:
_lowerCAmelCase =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_lowerCAmelCase =torch.tensor(lowercase__ , device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowercase__ ) and fpaa_statistics is None:
_lowerCAmelCase =new_value.T
_lowerCAmelCase =old_value.__dict__
if is_abit:
_lowerCAmelCase =bnb.nn.IntaParams(lowercase__ , requires_grad=lowercase__ , **lowercase__ ).to(lowercase__ )
elif is_abit:
_lowerCAmelCase =bnb.nn.Paramsabit(lowercase__ , requires_grad=lowercase__ , **lowercase__ ).to(lowercase__ )
_lowerCAmelCase =new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowercase__ ) )
else:
if value is None:
_lowerCAmelCase =old_value.to(lowercase__ )
elif isinstance(lowercase__ , torch.Tensor ):
_lowerCAmelCase =value.to(lowercase__ )
else:
_lowerCAmelCase =torch.tensor(lowercase__ , device=lowercase__ )
if is_buffer:
_lowerCAmelCase =new_value
else:
_lowerCAmelCase =nn.Parameter(lowercase__ , requires_grad=old_value.requires_grad )
_lowerCAmelCase =new_value
def snake_case_ ( lowercase__ : str , lowercase__ : Tuple=None , lowercase__ : str=None , lowercase__ : int=None , lowercase__ : List[str]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
_lowerCAmelCase =[]
current_key_name.append(lowercase__ )
if (isinstance(lowercase__ , nn.Linear ) or isinstance(lowercase__ , lowercase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowercase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase , _lowerCAmelCase =module.weight.shape
else:
_lowerCAmelCase =module.in_features
_lowerCAmelCase =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_lowerCAmelCase =bnb.nn.LinearabitLt(
lowercase__ , lowercase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
_lowerCAmelCase =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_lowerCAmelCase =bnb.nn.Linearabit(
lowercase__ , lowercase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
_lowerCAmelCase =True
# Store the module class in case we need to transpose the weight later
_lowerCAmelCase =type(lowercase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowercase__ )
if len(list(module.children() ) ) > 0:
_lowerCAmelCase , _lowerCAmelCase =_replace_with_bnb_linear(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_been_replaced=lowercase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def snake_case_ ( lowercase__ : Optional[int] , lowercase__ : Union[str, Any]=None , lowercase__ : Dict=None , lowercase__ : str=None ):
'''simple docstring'''
_lowerCAmelCase =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_lowerCAmelCase , _lowerCAmelCase =_replace_with_bnb_linear(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def snake_case_ ( *lowercase__ : Tuple , **lowercase__ : str ):
'''simple docstring'''
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowercase__ , )
return replace_with_bnb_linear(*lowercase__ , **lowercase__ )
def snake_case_ ( *lowercase__ : int , **lowercase__ : List[Any] ):
'''simple docstring'''
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowercase__ , )
return set_module_quantized_tensor_to_device(*lowercase__ , **lowercase__ )
def snake_case_ ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase =deepcopy(lowercase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_lowerCAmelCase =find_tied_parameters(lowercase__ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase__ , lowercase__ ):
_lowerCAmelCase =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
_lowerCAmelCase =sum(lowercase__ , [] )
_lowerCAmelCase =len(lowercase__ ) > 0
# Check if it is a base model
_lowerCAmelCase =not hasattr(lowercase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_lowerCAmelCase =list(model.named_children() )
_lowerCAmelCase =[list_modules[-1][0]]
# add last module together with tied weights
_lowerCAmelCase =set(lowercase__ ) - set(lowercase__ )
_lowerCAmelCase =list(set(lowercase__ ) ) + list(lowercase__ )
# remove ".weight" from the keys
_lowerCAmelCase =[""".weight""", """.bias"""]
_lowerCAmelCase =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_lowerCAmelCase =name.replace(lowercase__ , """""" )
filtered_module_names.append(lowercase__ )
return filtered_module_names
| 149
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : str , ):
_lowerCAmelCase =parent
_lowerCAmelCase =13
_lowerCAmelCase =7
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =99
_lowerCAmelCase =32
_lowerCAmelCase =2
_lowerCAmelCase =4
_lowerCAmelCase =37
_lowerCAmelCase ="""gelu"""
_lowerCAmelCase =0.1
_lowerCAmelCase =0.1
_lowerCAmelCase =512
_lowerCAmelCase =16
_lowerCAmelCase =2
_lowerCAmelCase =0.02
_lowerCAmelCase =3
_lowerCAmelCase =4
_lowerCAmelCase =None
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Any ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.prepare_config_and_inputs()
_lowerCAmelCase =True
_lowerCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
_lowerCAmelCase =TFEsmModel(config=lowerCamelCase_ )
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , ):
_lowerCAmelCase =True
_lowerCAmelCase =TFEsmModel(config=lowerCamelCase_ )
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
_lowerCAmelCase =model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ):
_lowerCAmelCase =TFEsmForMaskedLM(config=lowerCamelCase_ )
_lowerCAmelCase =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ):
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFEsmForTokenClassification(config=lowerCamelCase_ )
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a_: Union[str, Any] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a_: List[str] = False
a_: List[Any] = False
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =TFEsmModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase =model.get_bias()
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ , tf.Variable )
else:
_lowerCAmelCase =model.get_output_embeddings()
assert x is None
_lowerCAmelCase =model.get_bias()
assert name is None
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase =model(lowerCamelCase_ )[0]
_lowerCAmelCase =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
_lowerCAmelCase =tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCAmelCase =model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
_lowerCAmelCase =tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 149
| 1
|
_lowerCAmelCase: Tuple = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _lowercase( __a : str ):
a__ ={'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
a__ =Stack()
a__ =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
a__ =operator_stack.peek()
operator_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operand_stack.peek()
operand_stack.pop()
a__ =operators[opr](__a , __a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase: Dict = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 20
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase_ (unittest.TestCase ):
def __UpperCamelCase ( self) -> int:
a__ =tempfile.mkdtemp()
a__ =BlipImageProcessor()
a__ =BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
a__ =BlipProcessor(lowercase_ , lowercase_)
processor.save_pretrained(self.tmpdirname)
def __UpperCamelCase ( self , **lowercase_) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).tokenizer
def __UpperCamelCase ( self , **lowercase_) -> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_).image_processor
def __UpperCamelCase ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def __UpperCamelCase ( self) -> str:
a__ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ =[Image.fromarray(np.moveaxis(lowercase_ , 0 , -1)) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self) -> str:
a__ =BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ =self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0)
a__ =BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase_ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase_)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase_)
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =self.prepare_image_inputs()
a__ =image_processor(lowercase_ , return_tensors='np')
a__ =processor(images=lowercase_ , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __UpperCamelCase ( self) -> List[str]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =processor(text=lowercase_)
a__ =tokenizer(lowercase_ , return_token_type_ids=lowercase_)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __UpperCamelCase ( self) -> int:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase_):
processor()
def __UpperCamelCase ( self) -> Tuple:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ =processor.batch_decode(lowercase_)
a__ =tokenizer.batch_decode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCamelCase ( self) -> List[Any]:
a__ =self.get_image_processor()
a__ =self.get_tokenizer()
a__ =BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_)
a__ ='lower newer'
a__ =self.prepare_image_inputs()
a__ =processor(text=lowercase_ , images=lowercase_)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 20
| 1
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase : str = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Dict = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase : Dict = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCAmelCase : int = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase : Dict = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase : Optional[Any] = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def __lowerCAmelCase ( lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowerCamelCase )
return [m.group(0 ) for m in matches]
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowerCAmelCase = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__lowerCAmelCase = collections.defaultdict(lowerCamelCase )
__lowerCAmelCase = collections.defaultdict(lowerCamelCase )
__lowerCAmelCase = collections.defaultdict(lowerCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCamelCase ):
__lowerCAmelCase = None
if _re_tf_models.match(lowerCamelCase ) is not None:
__lowerCAmelCase = tf_models
__lowerCAmelCase = _re_tf_models.match(lowerCamelCase ).groups()[0]
elif _re_flax_models.match(lowerCamelCase ) is not None:
__lowerCAmelCase = flax_models
__lowerCAmelCase = _re_flax_models.match(lowerCamelCase ).groups()[0]
elif _re_pt_models.match(lowerCamelCase ) is not None:
__lowerCAmelCase = pt_models
__lowerCAmelCase = _re_pt_models.match(lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
__lowerCAmelCase = True
break
# Try again after removing the last word in the name
__lowerCAmelCase = "".join(camel_case_split(lowerCamelCase )[:-1] )
__lowerCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__lowerCAmelCase = list(lowerCamelCase )
all_models.sort()
__lowerCAmelCase = {"model_type": all_models}
__lowerCAmelCase = [pt_models[t] for t in all_models]
__lowerCAmelCase = [tf_models[t] for t in all_models]
__lowerCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__lowerCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__lowerCAmelCase = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__lowerCAmelCase = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__lowerCAmelCase = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__lowerCAmelCase = "AutoTokenizer"
__lowerCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__lowerCAmelCase = [model_mapping, f'''TF_{model_mapping}''', f'''FLAX_{model_mapping}''']
__lowerCAmelCase = [auto_class, f'''TF_{auto_class}''', f'''Flax_{auto_class}''']
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCamelCase , lowerCamelCase ):
continue
# First extract all model_names
__lowerCAmelCase = []
for name in getattr(lowerCamelCase , lowerCamelCase ).values():
if isinstance(lowerCamelCase , lowerCamelCase ):
model_names.append(lowerCamelCase )
else:
model_names.extend(list(lowerCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def __lowerCAmelCase ( lowerCamelCase : Any , lowerCamelCase : Any ):
'''simple docstring'''
__lowerCAmelCase = get_frameworks_table()
__lowerCAmelCase = Dataset.from_pandas(lowerCamelCase )
__lowerCAmelCase = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=lowerCamelCase )
__lowerCAmelCase = Dataset.from_json(lowerCamelCase )
__lowerCAmelCase = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(lowerCamelCase ) )
}
__lowerCAmelCase = update_pipeline_and_auto_class_table(lowerCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__lowerCAmelCase = sorted(table.keys() )
__lowerCAmelCase = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
__lowerCAmelCase = Dataset.from_pandas(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCamelCase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(lowerCamelCase , "pipeline_tags.json" ) )
if commit_sha is not None:
__lowerCAmelCase = (
f'''Update with commit {commit_sha}\n\nSee: '''
f'''https://github.com/huggingface/transformers/commit/{commit_sha}'''
)
else:
__lowerCAmelCase = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=lowerCamelCase , repo_type="dataset" , token=lowerCamelCase , commit_message=lowerCamelCase , )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__lowerCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__lowerCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__lowerCAmelCase = pipeline_tasks[key]["pt"]
if isinstance(lowerCamelCase , (list, tuple) ):
__lowerCAmelCase = model[0]
__lowerCAmelCase = model.__name__
if model not in in_table.values():
missing.append(lowerCamelCase )
if len(lowerCamelCase ) > 0:
__lowerCAmelCase = ", ".join(lowerCamelCase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f'''`utils/update_metadata.py`: {msg}. Please add them!''' )
if __name__ == "__main__":
lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
lowerCAmelCase : int = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 39
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
lowerCAmelCase : Any = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
a : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class UpperCAmelCase__ :
a : Optional[str] = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a : Optional[str] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a : bool = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a : Optional[int] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase_ ( self ) -> Tuple:
if self.train_file is not None:
__lowerCAmelCase = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__lowerCAmelCase = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
a : PreTrainedTokenizerBase
a : Union[bool, str, PaddingStrategy] = True
a : Optional[int] = None
a : Optional[int] = None
def __call__( self , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = "label" if "label" in features[0].keys() else "labels"
__lowerCAmelCase = [feature.pop(UpperCamelCase ) for feature in features]
__lowerCAmelCase = len(UpperCamelCase )
__lowerCAmelCase = len(features[0]["input_ids"] )
__lowerCAmelCase = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__lowerCAmelCase = list(chain(*UpperCamelCase ) )
__lowerCAmelCase = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
__lowerCAmelCase = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__lowerCAmelCase = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__lowerCAmelCase = {}
if data_args.train_file is not None:
__lowerCAmelCase = data_args.train_file
if data_args.validation_file is not None:
__lowerCAmelCase = data_args.validation_file
__lowerCAmelCase = data_args.train_file.split("." )[-1]
__lowerCAmelCase = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__lowerCAmelCase = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__lowerCAmelCase = [f'''ending{i}''' for i in range(4 )]
__lowerCAmelCase = "sent1"
__lowerCAmelCase = "sent2"
if data_args.max_seq_length is None:
__lowerCAmelCase = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
__lowerCAmelCase = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__lowerCAmelCase = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase : Tuple ):
__lowerCAmelCase = [[context] * 4 for context in examples[context_name]]
__lowerCAmelCase = examples[question_header_name]
__lowerCAmelCase = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
__lowerCAmelCase = list(chain(*lowerCamelCase ) )
# Tokenize
__lowerCAmelCase = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
__lowerCAmelCase = raw_datasets["train"]
if data_args.max_train_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_train_samples )
__lowerCAmelCase = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
__lowerCAmelCase = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
__lowerCAmelCase = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
__lowerCAmelCase = min(len(lowerCamelCase ) , data_args.max_eval_samples )
__lowerCAmelCase = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
__lowerCAmelCase = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__lowerCAmelCase = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase : Dict ):
__lowerCAmelCase , __lowerCAmelCase = eval_predictions
__lowerCAmelCase = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__lowerCAmelCase = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase = last_checkpoint
__lowerCAmelCase = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__lowerCAmelCase = train_result.metrics
__lowerCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("train" , lowerCamelCase )
trainer.save_metrics("train" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__lowerCAmelCase = trainer.evaluate()
__lowerCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
__lowerCAmelCase = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("eval" , lowerCamelCase )
trainer.save_metrics("eval" , lowerCamelCase )
__lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def __lowerCAmelCase ( lowerCamelCase : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 39
| 1
|
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
lowerCAmelCase : List[str] = """src/transformers"""
# Matches is_xxx_available()
lowerCAmelCase : List[str] = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCAmelCase : str = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCAmelCase : Optional[int] = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCAmelCase : Optional[int] = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCAmelCase : str = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCAmelCase : List[Any] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCAmelCase : Dict = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCAmelCase : Tuple = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCAmelCase : Optional[int] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCAmelCase : Optional[int] = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowerCAmelCase : Optional[int] = re.compile(R"""^\s*else:""")
def _A ( A ) -> List[Any]:
if _re_test_backend.search(A ) is None:
return None
lowercase : Union[str, Any] = [b[0] for b in _re_backend.findall(A )]
backends.sort()
return "_and_".join(A )
def _A ( A ) -> Optional[Any]:
with open(A ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
lowercase : Tuple = f.readlines()
lowercase : List[str] = 0
while line_index < len(A ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
lowercase : Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A ):
lowercase : int = _re_one_line_import_struct.search(A ).groups()[0]
lowercase : Union[str, Any] = re.findall("\[([^\]]+)\]" ,A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
lowercase : Optional[Any] = _re_import_struct_key_value.search(A )
if single_line_import_search is not None:
lowercase : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(A ) > 0]
objects.extend(A )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
lowercase : Tuple = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
lowercase : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(A ) is not None:
objects.append(_re_import_struct_add_one.search(A ).groups()[0] )
elif _re_import_struct_add_many.search(A ) is not None:
lowercase : Dict = _re_import_struct_add_many.search(A ).groups()[0].split(", " )
lowercase : Any = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_between_brackets.search(A ) is not None:
lowercase : Optional[Any] = _re_between_brackets.search(A ).groups()[0].split(", " )
lowercase : Tuple = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_quote_object.search(A ) is not None:
objects.append(_re_quote_object.search(A ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 1_2 + "\"" ):
objects.append(line[1_3:-3] )
line_index += 1
lowercase : Optional[int] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase : str = []
while (
line_index < len(A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
lowercase : int = lines[line_index]
lowercase : Any = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase : Optional[Any] = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(A ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
lowercase : List[Any] = lines[line_index]
lowercase : Tuple = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowercase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _A ( A ,A ) -> List[str]:
def find_duplicates(A ):
return [k for k, v in collections.Counter(A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase : str = []
for key in import_dict_objects.keys():
lowercase : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
lowercase : List[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase : Optional[int] = "base imports" if key == "none" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def _A ( ) -> List[Any]:
lowercase : Any = []
for root, _, files in os.walk(A ):
if "__init__.py" in files:
lowercase : Optional[Any] = os.path.join(A ,"__init__.py" )
lowercase : Tuple = parse_init(A )
if objects is not None:
lowercase : Tuple = analyze_results(*A )
if len(A ) > 0:
lowercase : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("\n".join(A ) )
if len(A ) > 0:
raise ValueError("\n\n".join(A ) )
def _A ( ) -> Union[str, Any]:
lowercase : Dict = []
for path, directories, files in os.walk(A ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A ) / folder).glob("*.py" ) ) ) == 0:
continue
lowercase : str = str((Path(A ) / folder).relative_to(A ) )
lowercase : Optional[Any] = short_path.replace(os.path.sep ,"." )
submodules.append(A )
for fname in files:
if fname == "__init__.py":
continue
lowercase : Tuple = str((Path(A ) / fname).relative_to(A ) )
lowercase : int = short_path.replace(".py" ,"" ).replace(os.path.sep ,"." )
if len(submodule.split("." ) ) == 1:
submodules.append(A )
return submodules
lowerCAmelCase : Dict = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def _A ( ) -> Optional[int]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase : int = importlib.util.spec_from_file_location(
"transformers" ,os.path.join(A ,"__init__.py" ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,)
lowercase : Optional[int] = spec.loader.load_module()
lowercase : Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A ) > 0:
lowercase : Optional[int] = "\n".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
F'''{list_of_modules}\n'''
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 372
|
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _A ( A ,A ,A ,A ) -> List[Any]:
lowercase : Optional[int] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase : Optional[int] = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
lowercase : Dict = F'''{src_lang}-{tgt_lang}'''
lowercase : Union[str, Any] = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=A ,exist_ok=A )
lowercase : str = os.path.join(A ,"README.md" )
print(F'''Generating {path}''' )
with open(A ,"w" ,encoding="utf-8" ) as f:
f.write(A )
# make sure we are under the root of the project
lowerCAmelCase : Union[str, Any] = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase : Union[str, Any] = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCAmelCase : Optional[Any] = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 372
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class a_ :
UpperCAmelCase : str = BlenderbotConfig
UpperCAmelCase : Tuple = {}
UpperCAmelCase : List[str] = """gelu"""
def __init__( self : Optional[Any] , a_ : Dict , a_ : Optional[Any]=1_3 , a_ : Optional[Any]=7 , a_ : Tuple=True , a_ : List[str]=False , a_ : int=9_9 , a_ : Any=3_2 , a_ : List[str]=2 , a_ : int=4 , a_ : Union[str, Any]=3_7 , a_ : Optional[int]=0.1 , a_ : str=0.1 , a_ : List[Any]=2_0 , a_ : List[Any]=2 , a_ : Dict=1 , a_ : Optional[Any]=0 , ) -> Dict:
snake_case: Optional[int] =parent
snake_case: List[str] =batch_size
snake_case: Dict =seq_length
snake_case: Union[str, Any] =is_training
snake_case: int =use_labels
snake_case: List[str] =vocab_size
snake_case: Optional[Any] =hidden_size
snake_case: List[str] =num_hidden_layers
snake_case: Any =num_attention_heads
snake_case: Any =intermediate_size
snake_case: Union[str, Any] =hidden_dropout_prob
snake_case: int =attention_probs_dropout_prob
snake_case: Dict =max_position_embeddings
snake_case: Any =eos_token_id
snake_case: Optional[Any] =pad_token_id
snake_case: str =bos_token_id
def UpperCamelCase ( self : List[str] ) -> Tuple:
snake_case: List[str] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case: Optional[Any] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case: Union[str, Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case: Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: Tuple =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case: List[str] =prepare_blenderbot_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def UpperCamelCase ( self : str , a_ : Any , a_ : Dict ) -> Dict:
snake_case: Tuple =TFBlenderbotModel(config=a_ ).get_decoder()
snake_case: Dict =inputs_dict['input_ids']
snake_case: Any =input_ids[:1, :]
snake_case: Tuple =inputs_dict['attention_mask'][:1, :]
snake_case: int =inputs_dict['head_mask']
snake_case: Union[str, Any] =1
# first forward pass
snake_case: Optional[int] =model(a_ , attention_mask=a_ , head_mask=a_ , use_cache=a_ )
snake_case: List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case: Union[str, Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case: List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case: Dict =tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case: Optional[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case: Any =model(a_ , attention_mask=a_ )[0]
snake_case: Dict =model(a_ , attention_mask=a_ , past_key_values=a_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case: Optional[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case: Any =output_from_no_past[:, -3:, random_slice_idx]
snake_case: Any =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a_ , a_ , rtol=1E-3 )
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
snake_case: Any =tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case: Tuple =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case: Optional[int] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case: Dict =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case: Optional[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a_ ( snake_case , snake_case , unittest.TestCase ):
UpperCAmelCase : Optional[int] = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCAmelCase : Optional[int] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase : Tuple = (
{
"""conversational""": TFBlenderbotForConditionalGeneration,
"""feature-extraction""": TFBlenderbotModel,
"""summarization""": TFBlenderbotForConditionalGeneration,
"""text2text-generation""": TFBlenderbotForConditionalGeneration,
"""translation""": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : List[str] = False
UpperCAmelCase : List[Any] = False
def UpperCamelCase ( self : Dict ) -> Optional[Any]:
snake_case: List[str] =TFBlenderbotModelTester(self )
snake_case: Any =ConfigTester(self , config_class=a_ )
def UpperCamelCase ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[str] ) -> List[Any]:
snake_case: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a_ )
@require_tokenizers
@require_tf
class a_ ( unittest.TestCase ):
UpperCAmelCase : Union[str, Any] = ["""My friends are cool but they eat too many carbs."""]
UpperCAmelCase : Tuple = """facebook/blenderbot-400M-distill"""
@cached_property
def UpperCamelCase ( self : int ) -> int:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self : List[str] ) -> Any:
snake_case: str =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self : List[str] ) -> Dict:
snake_case: Any =self.tokenizer(self.src_text , return_tensors='tf' )
snake_case: str =self.model.generate(
model_inputs.input_ids , )
snake_case: List[str] =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=a_ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 701
|
'''simple docstring'''
import numpy as np
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case: int =int(np.ceil((x_end - xa) / h ) )
snake_case: Optional[int] =np.zeros((n + 1,) )
snake_case: Optional[int] =ya
snake_case: List[str] =xa
for k in range(__UpperCAmelCase ):
snake_case: Optional[int] =f(__UpperCAmelCase , y[k] )
snake_case: Optional[Any] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case: Optional[Any] =f(x + 0.5 * h , y[k] + 0.5 * h * ka )
snake_case: Optional[Any] =f(x + h , y[k] + h * ka )
snake_case: List[Any] =y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347
| 0
|
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
a : int = logging.getLogger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple ) -> Dict:
__snake_case = np.argmax(_UpperCAmelCase , axis=1 )
return np.sum(outputs == labels )
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Dict:
with open(_UpperCAmelCase , encoding="utf_8" ) as f:
__snake_case = csv.reader(_UpperCAmelCase )
__snake_case = []
next(_UpperCAmelCase ) # skip the first line
for line in tqdm(_UpperCAmelCase ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
__snake_case = []
for dataset in encoded_datasets:
__snake_case = len(_UpperCAmelCase )
__snake_case = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__snake_case = np.zeros((n_batch, 2) , dtype=np.intaa )
__snake_case = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa )
__snake_case = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCAmelCase ):
__snake_case = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__snake_case = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__snake_case = with_conta
__snake_case = with_conta
__snake_case = len(_UpperCAmelCase ) - 1
__snake_case = len(_UpperCAmelCase ) - 1
__snake_case = with_conta
__snake_case = with_conta
__snake_case = mc_label
__snake_case = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=_UpperCAmelCase , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=_UpperCAmelCase , default="" )
parser.add_argument("--eval_dataset" , type=_UpperCAmelCase , default="" )
parser.add_argument("--seed" , type=_UpperCAmelCase , default=42 )
parser.add_argument("--num_train_epochs" , type=_UpperCAmelCase , default=3 )
parser.add_argument("--train_batch_size" , type=_UpperCAmelCase , default=8 )
parser.add_argument("--eval_batch_size" , type=_UpperCAmelCase , default=16 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=_UpperCAmelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=_UpperCAmelCase , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=_UpperCAmelCase , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=_UpperCAmelCase , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=_UpperCAmelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=_UpperCAmelCase , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=_UpperCAmelCase , default=0.01 )
parser.add_argument("--lm_coef" , type=_UpperCAmelCase , default=0.9 )
parser.add_argument("--n_valid" , type=_UpperCAmelCase , default=3_74 )
parser.add_argument("--server_ip" , type=_UpperCAmelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_UpperCAmelCase , default="" , help="Can be used for distant debugging." )
__snake_case = parser.parse_args()
print(_UpperCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__snake_case = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
__snake_case = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(_UpperCAmelCase , _UpperCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__snake_case = ["_start_", "_delimiter_", "_classify_"]
__snake_case = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCAmelCase )
__snake_case = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__snake_case = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
model.to(_UpperCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCAmelCase : Optional[Any] ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return obj
return [tokenize_and_encode(_UpperCAmelCase ) for o in obj]
logger.info("Encoding dataset..." )
__snake_case = load_rocstories_dataset(args.train_dataset )
__snake_case = load_rocstories_dataset(args.eval_dataset )
__snake_case = (train_dataset, eval_dataset)
__snake_case = tokenize_and_encode(_UpperCAmelCase )
# Compute the max input length for the Transformer
__snake_case = model.config.n_positions // 2 - 2
__snake_case = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__snake_case = min(_UpperCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__snake_case = pre_process_datasets(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
__snake_case , __snake_case = tensor_datasets[0], tensor_datasets[1]
__snake_case = TensorDataset(*_UpperCAmelCase )
__snake_case = RandomSampler(_UpperCAmelCase )
__snake_case = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.train_batch_size )
__snake_case = TensorDataset(*_UpperCAmelCase )
__snake_case = SequentialSampler(_UpperCAmelCase )
__snake_case = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__snake_case = args.max_steps
__snake_case = args.max_steps // (len(_UpperCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
__snake_case = len(_UpperCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__snake_case = list(model.named_parameters() )
__snake_case = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__snake_case = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__snake_case = AdamW(_UpperCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
__snake_case = get_linear_schedule_with_warmup(
_UpperCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCAmelCase )
if args.do_train:
__snake_case , __snake_case , __snake_case = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
__snake_case = 0
__snake_case = 0
__snake_case = tqdm(_UpperCAmelCase , desc="Training" )
for step, batch in enumerate(_UpperCAmelCase ):
__snake_case = tuple(t.to(_UpperCAmelCase ) for t in batch )
__snake_case , __snake_case , __snake_case , __snake_case = batch
__snake_case = model(_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase )
__snake_case = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__snake_case = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__snake_case = "Training loss: {:.2e} lr: {:.2e}".format(_UpperCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__snake_case = model.module if hasattr(_UpperCAmelCase , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__snake_case = os.path.join(args.output_dir , _UpperCAmelCase )
__snake_case = os.path.join(args.output_dir , _UpperCAmelCase )
torch.save(model_to_save.state_dict() , _UpperCAmelCase )
model_to_save.config.to_json_file(_UpperCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__snake_case = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__snake_case = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCAmelCase )
if args.do_eval:
model.eval()
__snake_case , __snake_case = 0, 0
__snake_case , __snake_case = 0, 0
for batch in tqdm(_UpperCAmelCase , desc="Evaluating" ):
__snake_case = tuple(t.to(_UpperCAmelCase ) for t in batch )
__snake_case , __snake_case , __snake_case , __snake_case = batch
with torch.no_grad():
__snake_case , __snake_case , __snake_case , __snake_case = model(
_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase )
__snake_case = mc_logits.detach().cpu().numpy()
__snake_case = mc_labels.to("cpu" ).numpy()
__snake_case = accuracy(_UpperCAmelCase , _UpperCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__snake_case = eval_loss / nb_eval_steps
__snake_case = eval_accuracy / nb_eval_examples
__snake_case = tr_loss / nb_tr_steps if args.do_train else None
__snake_case = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__snake_case = os.path.join(args.output_dir , "eval_results.txt" )
with open(_UpperCAmelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , _UpperCAmelCase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 69
|
'''simple docstring'''
from __future__ import annotations
from random import choice
def _snake_case ( A ) -> int:
return choice(A )
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = random_pivot(A )
# partition based on pivot
# linear time
lowerCAmelCase__ = [e for e in lst if e < pivot]
lowerCAmelCase__ = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A ) < k - 1:
return kth_number(A , k - len(A ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A , A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
| 0
|
from functools import lru_cache
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = 2
lowerCAmelCase : List[str] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.add(SCREAMING_SNAKE_CASE__ )
return factors
@lru_cache
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return len(unique_prime_factors(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return len(set(SCREAMING_SNAKE_CASE__ ) ) in (0, 1)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 2
while True:
# Increment each value of a generated range
lowerCAmelCase : Union[str, Any] = [base + i for i in range(SCREAMING_SNAKE_CASE__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCAmelCase : str = [upf_len(SCREAMING_SNAKE_CASE__ ) for x in group]
checker.append(SCREAMING_SNAKE_CASE__ )
# If all numbers in the list are equal, return the group variable.
if equality(SCREAMING_SNAKE_CASE__ ):
return group
# Increment our base variable by 1
base += 1
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ = 4 ):
'''simple docstring'''
lowerCAmelCase : List[Any] = run(SCREAMING_SNAKE_CASE__ )
return results[0] if len(SCREAMING_SNAKE_CASE__ ) else None
if __name__ == "__main__":
print(solution())
| 693
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'microsoft/swin-tiny-patch4-window7-224': (
'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _a ( snake_case_ , snake_case_ ):
_UpperCamelCase: int = "swin"
_UpperCamelCase: str = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , lowercase_=224 , lowercase_=4 , lowercase_=3 , lowercase_=96 , lowercase_=[2, 2, 6, 2] , lowercase_=[3, 6, 12, 24] , lowercase_=7 , lowercase_=4.0 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_="gelu" , lowercase_=False , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=32 , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Tuple:
super().__init__(**lowercase_ )
lowerCAmelCase : Optional[int] = image_size
lowerCAmelCase : Optional[Any] = patch_size
lowerCAmelCase : Optional[Any] = num_channels
lowerCAmelCase : List[Any] = embed_dim
lowerCAmelCase : str = depths
lowerCAmelCase : List[str] = len(lowercase_ )
lowerCAmelCase : Any = num_heads
lowerCAmelCase : str = window_size
lowerCAmelCase : List[str] = mlp_ratio
lowerCAmelCase : List[Any] = qkv_bias
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : int = attention_probs_dropout_prob
lowerCAmelCase : Any = drop_path_rate
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Dict = layer_norm_eps
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase : Any = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
lowerCAmelCase : Dict = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase , lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class _a ( snake_case_ ):
_UpperCamelCase: int = version.parse("1.11" )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-4
| 693
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_snake_case : str = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = R'\w+[.]\d+'
__lowerCAmelCase = re.findall(lowerCAmelCase_, lowerCAmelCase_ )
for pat in pats:
__lowerCAmelCase = key.replace(lowerCAmelCase_, '_'.join(pat.split('.' ) ) )
return key
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : str, lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__lowerCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__lowerCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__lowerCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowerCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__lowerCAmelCase = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowerCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
__lowerCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowerCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowerCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
__lowerCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__lowerCAmelCase = flax_model.init_weights(PRNGKey(lowerCAmelCase_ ) )
__lowerCAmelCase = flatten_dict(lowerCAmelCase_ )
__lowerCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowerCAmelCase = rename_key(lowerCAmelCase_ )
__lowerCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
__lowerCAmelCase , __lowerCAmelCase = rename_key_and_reshape_tensor(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
__lowerCAmelCase = jnp.asarray(lowerCAmelCase_ )
return unflatten_dict(lowerCAmelCase_ )
| 53
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> str:
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(snake_case , snake_case ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
UpperCAmelCase__ : str = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(snake_case )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 438
| 0
|
"""simple docstring"""
lowerCAmelCase_ = 65521
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : Optional[int] = 0
for plain_chr in plain_text:
_SCREAMING_SNAKE_CASE : Union[str, Any] = (a + ord(__SCREAMING_SNAKE_CASE )) % MOD_ADLER
_SCREAMING_SNAKE_CASE : List[Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 701
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "ChineseCLIPImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : Dict = self.image_processor
def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""")
if text is not None:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A)
if images is not None:
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A)
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A) , tensor_type=_A)
def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@property
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
| 635
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""vocab_file""": """vocab.txt"""}
snake_case_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
snake_case_ = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
snake_case_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a__ ( a__ ):
__magic_name__ : List[Any] = VOCAB_FILES_NAMES
__magic_name__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Union[str, Any] = ConvBertTokenizer
def __init__(self : Dict, __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : Optional[int]=None, __UpperCAmelCase : str=True, __UpperCAmelCase : Dict="[UNK]", __UpperCAmelCase : str="[SEP]", __UpperCAmelCase : Dict="[PAD]", __UpperCAmelCase : Dict="[CLS]", __UpperCAmelCase : Optional[Any]="[MASK]", __UpperCAmelCase : Optional[int]=True, __UpperCAmelCase : List[Any]=None, **__UpperCAmelCase : Optional[Any], ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase_, tokenizer_file=lowerCamelCase_, do_lower_case=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, pad_token=lowerCamelCase_, cls_token=lowerCamelCase_, mask_token=lowerCamelCase_, tokenize_chinese_chars=lowerCamelCase_, strip_accents=lowerCamelCase_, **lowerCamelCase_, )
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''', lowerCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''', lowerCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''', lowerCamelCase_ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : int = getattr(lowerCamelCase_, normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Tuple = do_lower_case
SCREAMING_SNAKE_CASE : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : Tuple = normalizer_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = do_lower_case
def lowercase__ (self : Optional[int], __UpperCAmelCase : Union[str, Any], __UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ (self : Any, __UpperCAmelCase : List[Any], __UpperCAmelCase : Union[str, Any] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ (self : List[Any], __UpperCAmelCase : str, __UpperCAmelCase : Union[str, Any] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(lowerCamelCase_, name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 507
|
'''simple docstring'''
def _snake_case ( A ) -> int:
if n == 1 or not isinstance(A , A ):
return 0
elif n == 2:
return 1
else:
lowerCAmelCase__ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
while digits < n:
index += 1
lowerCAmelCase__ = len(str(fibonacci(A ) ) )
return index
def _snake_case ( A = 1000 ) -> int:
return fibonacci_digits_index(A )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 90
| 0
|
"""simple docstring"""
from itertools import product
def snake_case ( _a: Tuple , _a: Any )-> list[int]:
'''simple docstring'''
lowerCamelCase__ = sides_number
lowerCamelCase__ = max_face_number * dice_number
lowerCamelCase__ = [0] * (max_total + 1)
lowerCamelCase__ = 1
lowerCamelCase__ = range(SCREAMING_SNAKE_CASE_ , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE_ , repeat=SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = sum(SCREAMING_SNAKE_CASE_ )
totals_frequencies[total] += 1
return totals_frequencies
def snake_case ( )-> float:
'''simple docstring'''
lowerCamelCase__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase__ = 0
lowerCamelCase__ = 9
lowerCamelCase__ = 4 * 9
lowerCamelCase__ = 6
for peter_total in range(SCREAMING_SNAKE_CASE_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase__ = (4**9) * (6**6)
lowerCamelCase__ = peter_wins_count / total_games_number
lowerCamelCase__ = round(SCREAMING_SNAKE_CASE_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 703
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( _a: int , _a: int = 2 , _a: int = 1 , _a: int = 3 , )-> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_a: int , _a: int , _a: int ) -> int:
return (pow(_a , 2 ) + step) % modulus
for _ in range(_a ):
# These track the position within the cycle detection logic.
lowerCamelCase__ = seed
lowerCamelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
lowerCamelCase__ = rand_fn(_a , _a , _a )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase__ = gcd(hare - tortoise , _a )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
_snake_case = parser.parse_args()
_snake_case = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
_snake_case = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 659
| 0
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
snake_case__ : str = """CompVis/stable-diffusion-v1-1"""
snake_case__ : Optional[Any] = """CompVis/stable-diffusion-v1-2"""
snake_case__ : List[Any] = """CompVis/stable-diffusion-v1-3"""
snake_case__ : int = """CompVis/stable-diffusion-v1-4"""
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , ) -> List[str]:
super()._init_()
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase )
UpperCamelCase_ = StableDiffusionPipeline(
vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , requires_safety_checker=_UpperCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _UpperCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , _UpperCAmelCase ) for k in self.config.keys() if not k.startswith('_' )}
def _UpperCAmelCase ( self , _UpperCAmelCase = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(_UpperCAmelCase )
@torch.no_grad()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> Any:
return self.pipea(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
@torch.no_grad()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> List[Any]:
return self.pipea(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
@torch.no_grad()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> Optional[int]:
return self.pipea(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
@torch.no_grad()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> Tuple:
return self.pipea(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
@torch.no_grad()
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = 512 , _UpperCAmelCase = 512 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ) -> List[Any]:
UpperCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(_UpperCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase_ = self.textaimg_sda_a(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase_ = self.textaimg_sda_a(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase_ = self.textaimg_sda_a(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase_ = self.textaimg_sda_a(
prompt=_UpperCAmelCase , height=_UpperCAmelCase , width=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , guidance_scale=_UpperCAmelCase , negative_prompt=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , latents=_UpperCAmelCase , output_type=_UpperCAmelCase , return_dict=_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=_UpperCAmelCase , **_UpperCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 23
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
| 32
| 0
|
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = TaConfig.from_json_file(UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
a_ = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 705
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase_ = 'path-to-your-trained-model'
UpperCamelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
UpperCamelCase_ = 'A photo of sks dog in a bucket'
UpperCamelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 210
| 0
|
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = weight
def __repr__( self ):
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase_ ( self ):
'''simple docstring'''
return self.value
def lowercase_ ( self ):
'''simple docstring'''
return self.name
def lowercase_ ( self ):
'''simple docstring'''
return self.weight
def lowercase_ ( self ):
'''simple docstring'''
return self.value / self.weight
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gptj"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase__ : List[Any]=5_0_4_0_0 , UpperCamelCase__ : int=2_0_4_8 , UpperCamelCase__ : Dict=4_0_9_6 , UpperCamelCase__ : Dict=2_8 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]="gelu_new" , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=5_0_2_5_6 , UpperCamelCase__ : int=5_0_2_5_6 , UpperCamelCase__ : int=False , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = rotary_dim
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , 'pad_token_id' , UpperCamelCase__ ):
# TODO: how to do that better?
UpperCamelCase = 0
@property
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A ( self : List[str] ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self : str ):
"""simple docstring"""
return self._config.n_head
def A ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase = ordered_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def A ( self : int ):
"""simple docstring"""
return 1_3
| 430
| 0
|
"""simple docstring"""
def A( snake_case_ ):
"""simple docstring"""
lowercase__ , lowercase__: str = [], []
while len(snake_case_ ) > 1:
lowercase__ , lowercase__: Optional[int] = min(snake_case_ ), max(snake_case_ )
start.append(snake_case_ )
end.append(snake_case_ )
collection.remove(snake_case_ )
collection.remove(snake_case_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 120
|
"""simple docstring"""
from math import factorial
def A( snake_case_ = 20 ):
"""simple docstring"""
lowercase__: Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowercase__: int = n // 2
return int(factorial(snake_case_ ) / (factorial(snake_case_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 120
| 1
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _snake_case ( snake_case__ : str = "." ):
for dir_path, dir_names, filenames in os.walk(snake_case__ ):
A = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case__ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case__ , snake_case__ ).lstrip('./' )
def _snake_case ( snake_case__ : str ):
return F'{i * " "}*' if i else "\n##"
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(snake_case__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _snake_case ( snake_case__ : str = "." ):
A = ''
for filepath in sorted(good_file_paths(snake_case__ ) ):
A , A = os.path.split(snake_case__ )
if filepath != old_path:
A = print_path(snake_case__ , snake_case__ )
A = (filepath.count(os.sep ) + 1) if filepath else 0
A = F'{filepath}/{filename}'.replace(' ' , '%20' )
A = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(snake_case__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 91
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def __a ( A , A , A ) -> str:
'''simple docstring'''
A__ = state_dict.pop(A )
A__ = val
def __a ( A ) -> Tuple:
'''simple docstring'''
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
A__ = value
else:
A__ = value
return new_state_dict
def __a ( A ) -> Optional[int]:
'''simple docstring'''
A__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A__ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A__ = in_proj_weight_cross_attn[:256, :]
A__ = in_proj_bias_cross_attn[:256]
A__ = in_proj_weight_cross_attn[256:512, :]
A__ = in_proj_bias_cross_attn[256:512]
A__ = in_proj_weight_cross_attn[-256:, :]
A__ = in_proj_bias_cross_attn[-256:]
def __a ( A , A ) -> int:
'''simple docstring'''
A__ , A__ = image.size
A__ = max(A , A )
A__ = 800 if "detection" in checkpoint_url else 1_000
A__ = target_max_size / current_max_size
A__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __a ( A ) -> Union[str, Any]:
'''simple docstring'''
A__ = F.to_tensor(A )
A__ = F.normalize(A , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def __a ( A , A , A ) -> Dict:
'''simple docstring'''
logger.info("Converting model..." )
# load original state dict
A__ = torch.hub.load_state_dict_from_url(A , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(A , A , A )
A__ = rename_backbone_keys(A )
# query, key and value matrices need special treatment
read_in_q_k_v(A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A__ = state_dict.pop(A )
A__ = val
# create HuggingFace model and load state dict
A__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A__ = 15
A__ = 2
A__ = {0: "table", 1: "table rotated"}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
else:
A__ = 125
A__ = 6
A__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
A__ = TableTransformerForObjectDetection(A )
model.load_state_dict(A )
model.eval()
# verify our conversion
A__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
A__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=A )
A__ = Image.open(A ).convert("RGB" )
A__ = normalize(resize(A , A ) ).unsqueeze(0 )
A__ = model(A )
if "detection" in checkpoint_url:
A__ = (1, 15, 3)
A__ = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A__ = (1, 125, 7)
A__ = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
A__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(A )
image_processor.push_to_hub(A )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 337
| 0
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[Any] = DownBlockaD # noqa F405
lowerCamelCase : Tuple = """down"""
def UpperCAmelCase__ ( self : Any):
_lowercase: int = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : str = ResnetDownsampleBlockaD # noqa F405
lowerCamelCase : Optional[int] = """down"""
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Optional[Any] = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[str] = AttnDownBlockaD # noqa F405
lowerCamelCase : Tuple = """down"""
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: List[Any] = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : int = CrossAttnDownBlockaD # noqa F405
lowerCamelCase : Tuple = """down"""
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase , _lowercase: Any = super().prepare_init_args_and_inputs_for_common()
_lowercase: Dict = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: List[Any] = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[str] = SimpleCrossAttnDownBlockaD # noqa F405
lowerCamelCase : Tuple = """down"""
@property
def UpperCAmelCase__ ( self : int):
return super().get_dummy_input(include_encoder_hidden_states=_UpperCamelCase)
def UpperCAmelCase__ ( self : Dict):
_lowercase , _lowercase: str = super().prepare_init_args_and_inputs_for_common()
_lowercase: List[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent")
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: str = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Any = SkipDownBlockaD # noqa F405
lowerCamelCase : str = """down"""
@property
def UpperCAmelCase__ ( self : Dict):
return super().get_dummy_input(include_skip_sample=_UpperCamelCase)
def UpperCAmelCase__ ( self : int):
_lowercase: List[str] = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Tuple = AttnSkipDownBlockaD # noqa F405
lowerCamelCase : str = """down"""
@property
def UpperCAmelCase__ ( self : Optional[int]):
return super().get_dummy_input(include_skip_sample=_UpperCamelCase)
def UpperCAmelCase__ ( self : Dict):
_lowercase: Optional[int] = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = DownEncoderBlockaD # noqa F405
lowerCamelCase : str = """down"""
@property
def UpperCAmelCase__ ( self : Optional[int]):
return super().get_dummy_input(include_temb=_UpperCamelCase)
def UpperCAmelCase__ ( self : int):
_lowercase: Union[str, Any] = {
"in_channels": 32,
"out_channels": 32,
}
_lowercase: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: int = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = AttnDownEncoderBlockaD # noqa F405
lowerCamelCase : List[Any] = """down"""
@property
def UpperCAmelCase__ ( self : Optional[int]):
return super().get_dummy_input(include_temb=_UpperCamelCase)
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Union[str, Any] = {
"in_channels": 32,
"out_channels": 32,
}
_lowercase: List[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Any = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Tuple = UNetMidBlockaD # noqa F405
lowerCamelCase : List[Any] = """mid"""
def UpperCAmelCase__ ( self : List[str]):
_lowercase: List[str] = {
"in_channels": 32,
"temb_channels": 128,
}
_lowercase: List[Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Tuple):
_lowercase: str = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = UNetMidBlockaDCrossAttn # noqa F405
lowerCamelCase : int = """mid"""
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase , _lowercase: List[str] = super().prepare_init_args_and_inputs_for_common()
_lowercase: Union[str, Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int):
_lowercase: List[Any] = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCamelCase : Optional[Any] = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int]):
return super().get_dummy_input(include_encoder_hidden_states=_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase , _lowercase: Dict = super().prepare_init_args_and_inputs_for_common()
_lowercase: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Union[str, Any] = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[Any] = UpBlockaD # noqa F405
lowerCamelCase : int = """up"""
@property
def UpperCAmelCase__ ( self : Tuple):
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Union[str, Any] = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[str] = ResnetUpsampleBlockaD # noqa F405
lowerCamelCase : Union[str, Any] = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int]):
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCamelCase)
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: Tuple = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : int = CrossAttnUpBlockaD # noqa F405
lowerCamelCase : List[str] = """up"""
@property
def UpperCAmelCase__ ( self : List[str]):
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCamelCase)
def UpperCAmelCase__ ( self : Dict):
_lowercase , _lowercase: Tuple = super().prepare_init_args_and_inputs_for_common()
_lowercase: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int):
_lowercase: Optional[int] = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
lowerCamelCase : List[str] = """up"""
@property
def UpperCAmelCase__ ( self : List[Any]):
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCamelCase , include_encoder_hidden_states=_UpperCamelCase)
def UpperCAmelCase__ ( self : Tuple):
_lowercase , _lowercase: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
_lowercase: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str):
_lowercase: Union[str, Any] = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : List[Any] = AttnUpBlockaD # noqa F405
lowerCamelCase : Union[str, Any] = """up"""
@property
def UpperCAmelCase__ ( self : Dict):
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCamelCase)
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent")
def UpperCAmelCase__ ( self : int):
_lowercase: Dict = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Optional[int] = SkipUpBlockaD # noqa F405
lowerCamelCase : Any = """up"""
@property
def UpperCAmelCase__ ( self : List[str]):
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCamelCase)
def UpperCAmelCase__ ( self : str):
_lowercase: Tuple = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Dict = AttnSkipUpBlockaD # noqa F405
lowerCamelCase : Optional[Any] = """up"""
@property
def UpperCAmelCase__ ( self : Tuple):
return super().get_dummy_input(include_res_hidden_states_tuple=_UpperCamelCase)
def UpperCAmelCase__ ( self : List[str]):
_lowercase: Optional[int] = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Any = UpDecoderBlockaD # noqa F405
lowerCamelCase : Optional[int] = """up"""
@property
def UpperCAmelCase__ ( self : str):
return super().get_dummy_input(include_temb=_UpperCamelCase)
def UpperCAmelCase__ ( self : str):
_lowercase: Union[str, Any] = {"in_channels": 32, "out_channels": 32}
_lowercase: Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: str = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(_UpperCamelCase)
class A ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Tuple = AttnUpDecoderBlockaD # noqa F405
lowerCamelCase : Union[str, Any] = """up"""
@property
def UpperCAmelCase__ ( self : List[Any]):
return super().get_dummy_input(include_temb=_UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Union[str, Any] = {"in_channels": 32, "out_channels": 32}
_lowercase: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int):
_lowercase: List[str] = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(_UpperCamelCase)
| 206
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __lowerCAmelCase ( __magic_name__ ):
def wrapper(*__magic_name__ , **__magic_name__ ):
_lowercase: Union[str, Any] = timeit.default_timer()
_lowercase: Tuple = func(*__magic_name__ , **__magic_name__ )
_lowercase: Tuple = timeit.default_timer() - starttime
return delta
_lowercase: str = func.__name__
return wrapper
def __lowerCAmelCase ( __magic_name__ , __magic_name__=1_0_0 , __magic_name__=None ):
_lowercase: Any = []
_lowercase: Tuple = seq_shapes or {}
for i in range(__magic_name__ ):
_lowercase: Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__magic_name__ , _ArrayXD ):
_lowercase: Any = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__magic_name__ , datasets.Value ):
if v.dtype == "string":
_lowercase: List[str] = "The small grey turtle was surprisingly fast when challenged."
else:
_lowercase: int = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(__magic_name__ , datasets.Sequence ):
while isinstance(__magic_name__ , datasets.Sequence ):
_lowercase: int = v.feature
_lowercase: List[str] = seq_shapes[k]
_lowercase: int = np.random.rand(*__magic_name__ ).astype(v.dtype )
_lowercase: List[str] = data
dummy_data.append((i, example) )
return dummy_data
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__=1_0_0 , __magic_name__=None ):
_lowercase: Optional[Any] = generate_examples(__magic_name__ , num_examples=__magic_name__ , seq_shapes=__magic_name__ )
with ArrowWriter(features=__magic_name__ , path=__magic_name__ ) as writer:
for key, record in dummy_data:
_lowercase: Dict = features.encode_example(__magic_name__ )
writer.write(__magic_name__ )
_lowercase , _lowercase: Dict = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
_lowercase: List[str] = datasets.Dataset.from_file(filename=__magic_name__ , info=datasets.DatasetInfo(features=__magic_name__ ) )
return dataset
| 206
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( a__ , a__ , a__ , unittest.TestCase):
_lowerCAmelCase = StableUnCLIPImgaImgPipeline
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCAmelCase = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCAmelCase = frozenset([])
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 32
lowerCamelCase : Union[str, Any] = embedder_hidden_size
# image encoding components
lowerCamelCase : int = CLIPImageProcessor(crop_size=32, size=32 )
torch.manual_seed(0 )
lowerCamelCase : List[str] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=A, projection_dim=A, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = StableUnCLIPImageNormalizer(embedding_dim=A )
lowerCamelCase : Dict = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCamelCase : Any = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=A, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) )
torch.manual_seed(0 )
lowerCamelCase : Tuple = UNetaDConditionModel(
sample_size=32, in_channels=4, out_channels=4, down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D'), up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D'), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='projection', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=A, layers_per_block=1, upcast_attention=A, use_linear_projection=A, )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = DDIMScheduler(
beta_schedule='scaled_linear', beta_start=0.0_0085, beta_end=0.012, prediction_type='v_prediction', set_alpha_to_one=A, steps_offset=1, )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL()
lowerCamelCase : int = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def UpperCAmelCase_ ( self, A, A=0, A=True ):
"""simple docstring"""
if str(A ).startswith('mps' ):
lowerCamelCase : int = torch.manual_seed(A )
else:
lowerCamelCase : List[str] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase : Optional[Any] = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A )
if pil_image:
lowerCamelCase : str = input_image * 0.5 + 0.5
lowerCamelCase : Tuple = input_image.clamp(0, 1 )
lowerCamelCase : Tuple = input_image.cpu().permute(0, 2, 3, 1 ).float().numpy()
lowerCamelCase : Optional[int] = DiffusionPipeline.numpy_to_pil(A )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : int = self.get_dummy_components()
lowerCamelCase : str = StableUnCLIPImgaImgPipeline(**A )
lowerCamelCase : Optional[int] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase : List[Any] = self.get_dummy_inputs(A )
inputs.update({'image_embeds': None} )
lowerCamelCase : Optional[Any] = sd_pipe(**A ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Optional[Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : int = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=A )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=A )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def UpperCAmelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
lowerCamelCase : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
lowerCamelCase : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img', torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase : Optional[Any] = pipe(A, 'anime turle', generator=A, output_type='np' )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
lowerCamelCase : str = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img', torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase : Optional[int] = pipe(A, 'anime turle', generator=A, output_type='np' )
lowerCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A, A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img', torch_dtype=torch.floataa )
lowerCamelCase : Tuple = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Dict = pipe(
A, 'anime turtle', num_inference_steps=2, output_type='np', )
lowerCamelCase : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 320
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __snake_case ( a__):
_lowerCAmelCase = '''camembert'''
def __init__( self, A=3_0522, A=768, A=12, A=12, A=3072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1e-12, A=1, A=0, A=2, A="absolute", A=True, A=None, **A, ):
"""simple docstring"""
super().__init__(pad_token_id=A, bos_token_id=A, eos_token_id=A, **A )
lowerCamelCase : Any = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : Any = type_vocab_size
lowerCamelCase : str = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Optional[int] = position_embedding_type
lowerCamelCase : str = use_cache
lowerCamelCase : Union[str, Any] = classifier_dropout
class __snake_case ( a__):
@property
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 320
| 1
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __UpperCAmelCase ( a_ , a_ , a_ , a_=5):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('<mask>') == 1
snake_case_ = torch.tensor(tokenizer.encode(a_ , add_special_tokens=a_)).unsqueeze(0) # Batch size 1
snake_case_ = model(a_)[0] # The last hidden-state is the first element of the output tuple
snake_case_ = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
snake_case_ = logits[0, masked_index, :]
snake_case_ = logits.softmax(dim=0)
snake_case_ , snake_case_ = prob.topk(k=a_ , dim=0)
snake_case_ = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(a_))])
snake_case_ = tokenizer.mask_token
snake_case_ = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ')):
snake_case_ = predicted_token_bpe.replace('\u2581' , ' ')
if " {0}".format(a_) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(a_) , a_),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(a_ , a_),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
lowercase = CamembertTokenizer.from_pretrained("camembert-base")
lowercase = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
lowercase = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 607
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 607
| 1
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''esm'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : int=768 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : int=3_072 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=1_026 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1E-1_2 , _UpperCAmelCase : str="absolute" , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = emb_layer_norm_before
_A = token_dropout
_A = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
_A = EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_A = EsmFoldConfig(**_UpperCAmelCase )
_A = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
_A = get_default_vocab_list()
else:
_A = vocab_list
else:
_A = None
_A = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , _UpperCAmelCase ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
_A = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = None
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : bool = False
UpperCAmelCase : float = 0
UpperCAmelCase : bool = True
UpperCAmelCase : bool = False
UpperCAmelCase : int = 128
UpperCAmelCase : "TrunkConfig" = None
def lowerCAmelCase_ ( self : Optional[int] ):
if self.trunk is None:
_A = TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
_A = TrunkConfig(**self.trunk )
def lowerCAmelCase_ ( self : Dict ):
_A = asdict(self )
_A = self.trunk.to_dict()
return output
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : int = 48
UpperCAmelCase : int = 1024
UpperCAmelCase : int = 128
UpperCAmelCase : int = 32
UpperCAmelCase : int = 32
UpperCAmelCase : int = 32
UpperCAmelCase : float = 0
UpperCAmelCase : float = 0
UpperCAmelCase : bool = False
UpperCAmelCase : int = 4
UpperCAmelCase : Optional[int] = 128
UpperCAmelCase : "StructureModuleConfig" = None
def lowerCAmelCase_ ( self : str ):
if self.structure_module is None:
_A = StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
_A = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
_A = self.sequence_state_dim // self.sequence_head_width
_A = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = asdict(self )
_A = self.structure_module.to_dict()
return output
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : int = 384
UpperCAmelCase : int = 128
UpperCAmelCase : int = 16
UpperCAmelCase : int = 128
UpperCAmelCase : int = 12
UpperCAmelCase : int = 4
UpperCAmelCase : int = 8
UpperCAmelCase : float = 0.1
UpperCAmelCase : int = 8
UpperCAmelCase : int = 1
UpperCAmelCase : int = 2
UpperCAmelCase : int = 7
UpperCAmelCase : int = 10
UpperCAmelCase : float = 1E-8
UpperCAmelCase : float = 1E5
def lowerCAmelCase_ ( self : Optional[int] ):
return asdict(self )
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 7
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
A_ : Union[str, Any] = ["gpt2"]
A_ : Optional[int] = "gpt2"
if is_tf_available():
class lowerCamelCase (tf.Module ):
def __init__( self : Dict , __UpperCAmelCase : str ) -> Any:
super().__init__()
SCREAMING_SNAKE_CASE__ = tokenizer
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tokenized["""input_ids"""].to_tensor()
SCREAMING_SNAKE_CASE__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE__ = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class lowerCamelCase (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
super().setUp()
SCREAMING_SNAKE_CASE__ = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE__ = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
SCREAMING_SNAKE_CASE__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tokenizer([test_inputs] , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE__ = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tf.constant(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = compiled_tokenizer(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = ModelToSave(tokenizer=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ = Path(__UpperCAmelCase ) / """saved.model"""
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={"""serving_default""": model.serving} )
SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = loaded_model.signatures["""serving_default"""](__UpperCAmelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE__ = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE__ = TFGPTaTokenizer.from_config(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE__ = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 196
| 0
|
def __lowerCAmelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ) -> Dict:
_UpperCamelCase : int = ""
for i in table:
res += inp[i - 1]
return res
def __lowerCAmelCase ( __lowerCAmelCase : Dict ) -> List[str]:
return data[1:] + data[0]
def __lowerCAmelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> List[str]:
_UpperCamelCase : Any = ""
for i in range(len(UpperCAmelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCAmelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str ) -> Optional[Any]:
_UpperCamelCase : Dict = int("0b" + data[0] + data[-1] , 2 )
_UpperCamelCase : int = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCAmelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict ) -> List[str]:
_UpperCamelCase : int = message[:4]
_UpperCamelCase : int = message[4:]
_UpperCamelCase : Dict = apply_table(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCamelCase : Optional[Any] = xor(UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCamelCase : Dict = apply_sbox(UpperCAmelCase__ , temp[:4] ) # noqa: E741
_UpperCamelCase : List[str] = apply_sbox(UpperCAmelCase__ , temp[4:] )
_UpperCamelCase : Union[str, Any] = "0" * (2 - len(UpperCAmelCase__ )) + l # noqa: E741
_UpperCamelCase : Union[str, Any] = "0" * (2 - len(UpperCAmelCase__ )) + r
_UpperCamelCase : int = apply_table(l + r , UpperCAmelCase__ )
_UpperCamelCase : Dict = xor(UpperCAmelCase__ , UpperCAmelCase__ )
return temp + right
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("""Enter 10 bit key: """)
_SCREAMING_SNAKE_CASE = input("""Enter 8 bit message: """)
_SCREAMING_SNAKE_CASE = [6, 3, 7, 4, 8, 5, 1_0, 9]
_SCREAMING_SNAKE_CASE = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
_SCREAMING_SNAKE_CASE = [2, 4, 3, 1]
_SCREAMING_SNAKE_CASE = [2, 6, 3, 1, 4, 8, 5, 7]
_SCREAMING_SNAKE_CASE = [4, 1, 3, 5, 7, 2, 8, 6]
_SCREAMING_SNAKE_CASE = [4, 1, 2, 3, 2, 3, 4, 1]
_SCREAMING_SNAKE_CASE = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_SCREAMING_SNAKE_CASE = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_SCREAMING_SNAKE_CASE = apply_table(key, paa_table)
_SCREAMING_SNAKE_CASE = temp[:5]
_SCREAMING_SNAKE_CASE = temp[5:]
_SCREAMING_SNAKE_CASE = left_shift(left)
_SCREAMING_SNAKE_CASE = left_shift(right)
_SCREAMING_SNAKE_CASE = apply_table(left + right, pa_table)
_SCREAMING_SNAKE_CASE = left_shift(left)
_SCREAMING_SNAKE_CASE = left_shift(right)
_SCREAMING_SNAKE_CASE = left_shift(left)
_SCREAMING_SNAKE_CASE = left_shift(right)
_SCREAMING_SNAKE_CASE = apply_table(left + right, pa_table)
# encryption
_SCREAMING_SNAKE_CASE = apply_table(message, IP)
_SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
_SCREAMING_SNAKE_CASE = temp[4:] + temp[:4]
_SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
_SCREAMING_SNAKE_CASE = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
_SCREAMING_SNAKE_CASE = apply_table(CT, IP)
_SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
_SCREAMING_SNAKE_CASE = temp[4:] + temp[:4]
_SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
_SCREAMING_SNAKE_CASE = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 714
|
"""simple docstring"""
from itertools import count
def __lowerCAmelCase ( __lowerCAmelCase : int = 50 ) -> int:
_UpperCamelCase : Any = [1] * min_block_length
for n in count(__lowerCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(__lowerCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f'{solution() = }')
| 239
| 0
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case_ (A__ ):
def __init__( self :List[Any] ,__snake_case :Optional[Any] ,__snake_case :Optional[int]=7_68 ) -> str:
super().__init__(_UpperCamelCase )
a__ = proj_size
a__ = CLIPVisionModel(_UpperCamelCase )
a__ = PaintByExampleMapper(_UpperCamelCase )
a__ = nn.LayerNorm(config.hidden_size )
a__ = nn.Linear(config.hidden_size ,self.proj_size )
# uncondition for scaling
a__ = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def lowerCamelCase__( self :int ,__snake_case :Dict ,__snake_case :Dict=False ) -> Any:
a__ = self.model(pixel_values=_UpperCamelCase )
a__ = clip_output.pooler_output
a__ = self.mapper(latent_states[:, None] )
a__ = self.final_layer_norm(_UpperCamelCase )
a__ = self.proj_out(_UpperCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class snake_case_ (nn.Module ):
def __init__( self :Tuple ,__snake_case :str ) -> Optional[Any]:
super().__init__()
a__ = (config.num_hidden_layers + 1) // 5
a__ = config.hidden_size
a__ = 1
a__ = nn.ModuleList(
[
BasicTransformerBlock(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,activation_fn='gelu' ,attention_bias=_UpperCamelCase )
for _ in range(_UpperCamelCase )
] )
def lowerCamelCase__( self :Dict ,__snake_case :Optional[int] ) -> Optional[int]:
for block in self.blocks:
a__ = block(_UpperCamelCase )
return hidden_states
| 335
|
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
| 0
|
"""simple docstring"""
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
snake_case_ :str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
snake_case_ :str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
"""simple docstring"""
import os
def A_ ( ):
'''simple docstring'''
with open(os.path.dirname(_lowercase ) + """/grid.txt""" ) as f:
snake_case_ :Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowercase ) for x in f.readline().split()] )
snake_case_ :str = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case_ :Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case_ :str = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case_ :Tuple = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case_ :Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case_ :Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case_ :Union[str, Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3, 20 ):
snake_case_ :int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case_ :Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 310
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Optional[Any] = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__a :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__a :str = logging.get_logger(__name__)
__a :Any = Dict[str, Any]
__a :int = List[Prediction]
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __A ( self : str , **UpperCAmelCase : str ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ):
return super().__call__(*UpperCAmelCase , **UpperCAmelCase )
def __A ( self : str , UpperCAmelCase : Any ):
A_ = load_image(UpperCAmelCase )
A_ = torch.IntTensor([[image.height, image.width]] )
A_ = self.image_processor(images=[image] , return_tensors="pt" )
if self.tokenizer is not None:
A_ = self.tokenizer(text=inputs["words"] , boxes=inputs["boxes"] , return_tensors="pt" )
A_ = target_size
return inputs
def __A ( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
A_ = model_inputs.pop("target_size" )
A_ = self.model(**UpperCAmelCase )
A_ = outputs.__class__({"target_size": target_size, **outputs} )
if self.tokenizer is not None:
A_ = model_inputs["bbox"]
return model_outputs
def __A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=0.9 ):
A_ = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ = target_size[0].tolist()
def unnormalize(UpperCAmelCase : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ = [unnormalize(UpperCAmelCase ) for bbox in model_outputs["bbox"].squeeze(0 )]
A_ = ["score", "label", "box"]
A_ = [dict(zip(UpperCAmelCase , UpperCAmelCase ) ) for vals in zip(scores.tolist() , UpperCAmelCase , UpperCAmelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ = self.image_processor.post_process_object_detection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
A_ = raw_annotations[0]
A_ = raw_annotation["scores"]
A_ = raw_annotation["labels"]
A_ = raw_annotation["boxes"]
A_ = scores.tolist()
A_ = [self.model.config.idalabel[label.item()] for label in labels]
A_ = [self._get_bounding_box(UpperCAmelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ = ["score", "label", "box"]
A_ = [
dict(zip(UpperCAmelCase , UpperCAmelCase ) )
for vals in zip(raw_annotation["scores"] , raw_annotation["labels"] , raw_annotation["boxes"] )
]
return annotation
def __A ( self : Tuple , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 86
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__SCREAMING_SNAKE_CASE = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
UpperCamelCase__ : Union[str, Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase__ , UpperCamelCase__ : List[str] = emb.weight.shape
UpperCamelCase__ : List[Any] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
UpperCamelCase__ : List[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=None ) -> Union[str, Any]:
UpperCamelCase__ : Dict = {}
for old_key in state_dict.keys():
UpperCamelCase__ : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
UpperCamelCase__ : str = key.replace("moe_layer.experts.0" , f'ffn.experts.expert_{expert_idx}' )
else:
UpperCamelCase__ : Optional[int] = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
UpperCamelCase__ : str = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
UpperCamelCase__ : List[Any] = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
UpperCamelCase__ : Union[str, Any] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
UpperCamelCase__ : List[Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
UpperCamelCase__ : Tuple = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
UpperCamelCase__ : Optional[Any] = key.replace("final_layer_norm" , "ff_layer_norm" )
UpperCamelCase__ : Any = state_dict[old_key]
return new_dict
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ) -> int:
UpperCamelCase__ : Dict = []
UpperCamelCase__ : int = 0
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
for expert in range(__lowerCAmelCase ):
UpperCamelCase__ : List[Any] = switch_checkpoint_path + f'-rank-{expert}.pt'
if os.path.isfile(__lowerCAmelCase ):
UpperCamelCase__ : List[Any] = torch.load(__lowerCAmelCase )["model"]
remove_ignore_keys_(__lowerCAmelCase )
UpperCamelCase__ : Dict = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : Dict = os.path.join(
__lowerCAmelCase , weights_name.replace(".bin" , f'-{len(__lowerCAmelCase )+1:05d}-of-???.bin' ) )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCAmelCase )[0]].dtype )
# Add the last block
UpperCamelCase__ : Dict = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , f'-{len(__lowerCAmelCase )+1:05d}-of-???.bin' ) )
UpperCamelCase__ : Tuple = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : List[Any] = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCAmelCase ) == 1:
UpperCamelCase__ : Optional[int] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
# Otherwise, let's build the index
UpperCamelCase__ : Union[str, Any] = {}
for idx, shard in enumerate(__lowerCAmelCase ):
UpperCamelCase__ : str = weights_name.replace(".bin" , f'-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin' )
UpperCamelCase__ : Optional[Any] = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
for key in shard:
UpperCamelCase__ : List[str] = shard_file
# Add the metadata
UpperCamelCase__ : List[str] = {"total_size": total_size}
UpperCamelCase__ : Tuple = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
UpperCamelCase__ : Dict = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
lowerCamelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
lowerCamelCase : str =parser.parse_args()
lowerCamelCase , lowerCamelCase : Dict =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCamelCase : Optional[Any] =NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCamelCase : Optional[Any] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path)
| 228
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
assert (
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase__ , UpperCamelCase__ : Dict = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A =logging.get_logger(__name__)
__A ={
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _snake_case ( a__ , a__ ):
lowerCAmelCase :Dict = '''nat'''
lowerCAmelCase :Tuple = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCamelCase=4 , _lowerCamelCase=3 , _lowerCamelCase=64 , _lowerCamelCase=[3, 4, 6, 5] , _lowerCamelCase=[2, 4, 8, 16] , _lowerCamelCase=7 , _lowerCamelCase=3.0 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.0 , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : str = embed_dim
UpperCAmelCase__ : Tuple = depths
UpperCAmelCase__ : Union[str, Any] = len(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = num_heads
UpperCAmelCase__ : Tuple = kernel_size
UpperCAmelCase__ : Any = mlp_ratio
UpperCAmelCase__ : Optional[int] = qkv_bias
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = drop_path_rate
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : List[str] = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase__ : int = int(embed_dim * 2 ** (len(_lowerCamelCase) - 1))
UpperCAmelCase__ : Optional[int] = layer_scale_init_value
UpperCAmelCase__ : Tuple = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_lowerCamelCase) + 1)]
UpperCAmelCase__ : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names)
| 706
|
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__A =logging.getLogger()
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
return args.f
class _snake_case ( a__ ):
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ : Any = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_lowerCamelCase , 0.666)
@slow
@require_torch_non_multi_gpu
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_lowerCamelCase)
UpperCAmelCase__ : Dict = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowerCamelCase)
UpperCAmelCase__ : Any = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_lowerCamelCase)
| 113
| 0
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCamelCase =logging.getLogger(__name__)
UpperCamelCase ={"""facebook/bart-base""": BartForConditionalGeneration}
UpperCamelCase ={"""facebook/bart-base""": BartTokenizer}
def snake_case ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=__a , default=__a , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=__a , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=__a , default=__a , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=__a , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__a , )
parser.add_argument(
"""--config_name""" , type=__a , default=__a , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=__a , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=__a , default=__a , help="""Where to store the final ONNX file.""" )
UpperCamelCase_ : str = parser.parse_args()
return args
def snake_case ( a_ : List[Any] , a_ : Union[str, Any]="cpu" ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = model_dict[model_name].from_pretrained(__a ).to(__a )
UpperCamelCase_ : List[str] = tokenizer_dict[model_name].from_pretrained(__a )
if model_name in ["facebook/bart-base"]:
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = None
UpperCamelCase_ : Union[str, Any] = 0
return huggingface_model, tokenizer
def snake_case ( a_ : Optional[int] , a_ : Optional[Any] , a_ : List[str] , a_ : List[str] , a_ : str ) -> Union[str, Any]:
"""simple docstring"""
model.eval()
UpperCamelCase_ : int = None
UpperCamelCase_ : str = torch.jit.script(BARTBeamSearchGenerator(__a ) )
with torch.no_grad():
UpperCamelCase_ : int = "My friends are cool but they eat too many carbs."
UpperCamelCase_ : str = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="""pt""" ).to(model.device )
UpperCamelCase_ : int = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=__a , max_length=__a , early_stopping=__a , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__a , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __a , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=__a , )
logger.info("""Model exported to {}""".format(__a ) )
UpperCamelCase_ : List[str] = remove_dup_initializers(os.path.abspath(__a ) )
logger.info("""Deduplicated and optimized model written to {}""".format(__a ) )
UpperCamelCase_ : Dict = onnxruntime.InferenceSession(__a )
UpperCamelCase_ : Tuple = ort_sess.run(
__a , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(__a ),
"""max_length""": np.array(__a ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def snake_case ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = parse_args()
UpperCamelCase_ : str = 5
UpperCamelCase_ : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCamelCase_ : Optional[int] = torch.device(args.device )
UpperCamelCase_ : int = load_model_tokenizer(args.model_name_or_path , __a )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(__a )
if args.max_length:
UpperCamelCase_ : Optional[int] = args.max_length
if args.num_beams:
UpperCamelCase_ : int = args.num_beams
if args.output_file_path:
UpperCamelCase_ : str = args.output_file_path
else:
UpperCamelCase_ : Optional[Any] = "BART.onnx"
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(__a , __a , __a , __a , __a )
if __name__ == "__main__":
main()
| 208
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = ["pixel_values"]
def __init__( self : str , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : str , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Optional[Any] = size if size is not None else {"height": 256, "width": 256}
__lowerCamelCase : Any = get_size_dict(UpperCAmelCase )
__lowerCamelCase : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
__lowerCamelCase : Tuple = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Optional[int] = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Tuple = resample
__lowerCamelCase : List[Any] = do_center_crop
__lowerCamelCase : Union[str, Any] = crop_size
__lowerCamelCase : List[Any] = do_rescale
__lowerCamelCase : Any = rescale_factor
__lowerCamelCase : str = do_normalize
__lowerCamelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCamelCase : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ):
__lowerCamelCase : int = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["height"], size["width"]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ):
__lowerCamelCase : Tuple = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Union[str, Any] , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : str , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : str , ):
__lowerCamelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase : Tuple = resample if resample is not None else self.resample
__lowerCamelCase : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase : List[str] = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase : Optional[int] = image_std if image_std is not None else self.image_std
__lowerCamelCase : Tuple = size if size is not None else self.size
__lowerCamelCase : Union[str, Any] = get_size_dict(UpperCAmelCase )
__lowerCamelCase : str = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase : Dict = get_size_dict(UpperCAmelCase , param_name="crop_size" )
__lowerCamelCase : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowerCamelCase : Optional[Any] = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase : Optional[int] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCamelCase : Optional[int] = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase : Optional[Any] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase : List[Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
__lowerCamelCase : List[Any] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
__lowerCamelCase : str = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 707
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.