code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[str] = logging.get_logger(__name__)
A : Tuple = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''ibert'''
def __init__(self : int , _UpperCAmelCase : Union[str, Any]=3_0522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int="absolute" , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]="none" , **_UpperCAmelCase : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = quant_mode
lowercase__ = force_dequant
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 15 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class A ( unittest.TestCase ):
'''simple docstring'''
A__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
lowercase__ = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
for example in examples:
lowercase__ = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
{"""score""": ANY(_UpperCAmelCase ), """label""": ANY(_UpperCAmelCase )},
] , )
@require_torch
def lowerCamelCase__ (self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
lowercase__ = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
lowercase__ = pipeline(
"""video-classification""" , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
lowercase__ = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
lowercase__ = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}] , )
lowercase__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
[{"""score""": 0.5_199, """label""": """LABEL_0"""}, {"""score""": 0.4_801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
| 15 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase ( a_ , a_ ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = u
for i in range(1 , a_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = temp * (u - i)
return temp
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('enter the numbers of values: ' ) )
SCREAMING_SNAKE_CASE : list[list[float]] = []
for _ in range(a_ ):
y.append([] )
for i in range(a_ ):
for j in range(a_ ):
y[i].append(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
print('enter the values of parameters in a list: ' )
SCREAMING_SNAKE_CASE : Optional[int] = list(map(a_ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(a_ ):
SCREAMING_SNAKE_CASE : int = float(input() )
SCREAMING_SNAKE_CASE : List[Any] = int(input('enter the value to interpolate: ' ) )
SCREAMING_SNAKE_CASE : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , a_ ):
for j in range(n - i ):
SCREAMING_SNAKE_CASE : int = y[j + 1][i - 1] - y[j][i - 1]
SCREAMING_SNAKE_CASE : List[Any] = y[0][0]
for i in range(1 , a_ ):
summ += (ucal(a_ , a_ ) * y[0][i]) / math.factorial(a_ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 179 | '''simple docstring'''
from __future__ import annotations
from statistics import mean
def __lowerCAmelCase ( a_ , a_ , a_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * no_of_processes
SCREAMING_SNAKE_CASE : Optional[Any] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(a_ ):
SCREAMING_SNAKE_CASE : Tuple = burst_time[i]
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Dict = -1
for i in range(a_ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(a_ )
if len(a_ ) > 0:
SCREAMING_SNAKE_CASE : Dict = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
SCREAMING_SNAKE_CASE : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : Tuple = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowerCAmelCase ( a_ , a_ , a_ ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [0] * no_of_processes
for i in range(a_ ):
SCREAMING_SNAKE_CASE : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
_lowerCAmelCase :Optional[int] = 4
_lowerCAmelCase :Optional[int] = [2, 5, 3, 7]
_lowerCAmelCase :List[str] = [0, 0, 0, 0]
_lowerCAmelCase :Union[str, Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_lowerCAmelCase :Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 179 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__lowerCAmelCase : List[Any] = 3
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
__UpperCAmelCase = random.randrange(3 , UpperCamelCase__ )
if pow(UpperCamelCase__ , 2 , UpperCamelCase__ ) == 1:
continue
if pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) == 1:
continue
return g
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
print('''Generating prime p...''' )
__UpperCAmelCase = rabin_miller.generate_large_prime(UpperCamelCase__ ) # select large prime number.
__UpperCAmelCase = primitive_root(UpperCamelCase__ ) # one primitive root on modulo p.
__UpperCAmelCase = random.randrange(3 , UpperCamelCase__ ) # private_key -> have to be greater than 2 for safety.
__UpperCAmelCase = cryptomath.find_mod_inverse(pow(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
__UpperCAmelCase = (key_size, e_a, e_a, p)
__UpperCAmelCase = (key_size, d)
return public_key, private_key
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__UpperCAmelCase , __UpperCAmelCase = generate_key(UpperCamelCase__ )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def lowerCAmelCase ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_0_4_8 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 262 | '''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase : List[str] = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , __a : Union[str, Any] , __a : Optional[Any] , __a : List[Any]="replace" , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : Dict="</s>" , __a : Dict="<s>" , __a : Tuple="<unk>" , __a : List[str]="<pad>" , __a : Any="<mask>" , __a : Dict=False , **__a : Union[str, Any] , ) -> Optional[int]:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
return len(self.encoder )
def snake_case__ ( self : str ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self : List[Any] , __a : Tuple ) -> List[Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = get_pairs(__a )
if not pairs:
return token
while True:
__UpperCAmelCase = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(__a ):
try:
__UpperCAmelCase = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = new_word
if len(__a ) == 1:
break
else:
__UpperCAmelCase = get_pairs(__a )
__UpperCAmelCase = ''' '''.join(__a )
__UpperCAmelCase = word
return word
def snake_case__ ( self : int , __a : int ) -> List[Any]:
__UpperCAmelCase = []
for token in re.findall(self.pat , __a ):
__UpperCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self : Optional[Any] , __a : Tuple ) -> str:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case__ ( self : Optional[int] , __a : Any ) -> List[str]:
return self.decoder.get(__a )
def snake_case__ ( self : Union[str, Any] , __a : List[str] ) -> List[Any]:
__UpperCAmelCase = ''''''.join(__a )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '''\n''' )
__UpperCAmelCase = 0
with open(__a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__UpperCAmelCase = token_index
writer.write(''' '''.join(__a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : int , __a : Optional[int] , __a : int=False , **__a : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ''' ''' + text
return (text, kwargs)
def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Optional[Any] , __a : "Conversation" ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
__UpperCAmelCase = ''' '''.join(__a )
__UpperCAmelCase = self.encode(__a )
if len(__a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 262 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase = 1_00 ) -> int:
lowercase__ : str = set()
lowercase__ : List[str] = 0
lowercase__ : Any = n + 1 # maximum limit
for a in range(2 , __UpperCamelCase ):
for b in range(2 , __UpperCamelCase ):
lowercase__ : Union[str, Any] = a**b # calculates the current power
collect_powers.add(__UpperCamelCase ) # adds the result to the set
return len(__UpperCamelCase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 713 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ["flax", "transformers"]
def __init__( self : Union[str, Any] ,*_snake_case : str ,**_snake_case : List[str] ) -> Any:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Any ,*_snake_case : List[Any] ,**_snake_case : Dict ) -> Any:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : List[Any] ,**_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : str = ["flax", "transformers"]
def __init__( self : str ,*_snake_case : Union[str, Any] ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : List[str] ,*_snake_case : Any ,**_snake_case : str ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Optional[int] ,*_snake_case : List[Any] ,**_snake_case : int ) -> List[Any]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : Dict = ["flax", "transformers"]
def __init__( self : Any ,*_snake_case : str ,**_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : List[Any] ,*_snake_case : int ,**_snake_case : Tuple ) -> List[str]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] ,*_snake_case : Optional[Any] ,**_snake_case : Tuple ) -> int:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
class __A ( metaclass=A_ ):
'''simple docstring'''
lowerCAmelCase : List[str] = ["flax", "transformers"]
def __init__( self : Optional[Any] ,*_snake_case : List[str] ,**_snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Optional[Any] ,*_snake_case : int ,**_snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
@classmethod
def UpperCAmelCase ( cls : Tuple ,*_snake_case : Any ,**_snake_case : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls ,['''flax''', '''transformers'''] )
| 122 | 0 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowercase__ ( __UpperCamelCase : List[str] ):
'''simple docstring'''
if isinstance(__UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class lowerCamelCase__:
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self ):
"""simple docstring"""
pass
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(__UpperCAmelCase )
__lowercase = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=__UpperCAmelCase , text_model=__UpperCAmelCase )
__lowercase = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase )
__lowercase = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=__UpperCAmelCase , text_model=__UpperCAmelCase )
__lowercase = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__lowercase = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase )
__lowercase = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__lowercase = after_output[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-5 )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=__UpperCAmelCase , text_model=__UpperCAmelCase )
__lowercase = model(
input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(__UpperCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(__UpperCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(__UpperCAmelCase , __UpperCAmelCase , F'''Difference between torch and flax is {diff} (>= {tol}).''' )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCAmelCase )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**__UpperCAmelCase )
__lowercase = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase )
__lowercase = model_a(**__UpperCAmelCase )
__lowercase = after_outputs[0].numpy()
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-5 )
@require_tf
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 1_3
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = TFViTModel(__UpperCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(__UpperCAmelCase , name="""text_model""" )
return vision_model, text_model
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFViTModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" )
__lowercase = 1_3
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
__lowercase , __lowercase = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase )
__lowercase = TFVisionTextDualEncoderModel(vision_model=__UpperCAmelCase , text_model=__UpperCAmelCase )
__lowercase = model(
input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(__UpperCAmelCase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(__UpperCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = TFDeiTModel(__UpperCAmelCase , name="""vision_model""" )
__lowercase = TFRobertaModel(__UpperCAmelCase , name="""text_model""" )
return vision_model, text_model
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFDeiTModelTester(self )
__lowercase = TFRobertaModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class lowerCamelCase__( snake_case_ , unittest.TestCase ):
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" )
__lowercase = 1_3
__lowercase = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = TFCLIPVisionModel(__UpperCAmelCase , name="""vision_model""" )
__lowercase = TFBertModel(__UpperCAmelCase , name="""text_model""" )
return vision_model, text_model
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFCLIPVisionModelTester(self )
__lowercase = TFBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class lowerCamelCase__( unittest.TestCase ):
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__UpperCAmelCase )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" )
__lowercase = model(**__UpperCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __UpperCAmelCase , atol=1E-3 ) )
| 566 |
'''simple docstring'''
import numpy as np
def lowercase__ ( __UpperCamelCase : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566 | 1 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : int = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase_ = get_sagemaker_input()
else:
lowerCamelCase_ = get_cluster_input()
return config
def _SCREAMING_SNAKE_CASE ( lowercase : List[str]=None ):
'''simple docstring'''
if subparsers is not None:
lowerCamelCase_ = subparsers.add_parser('config' , description=lowercase )
else:
lowerCamelCase_ = argparse.ArgumentParser('Accelerate config command' , description=lowercase )
parser.add_argument(
'--config_file' , default=lowercase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = get_user_input()
if args.config_file is not None:
lowerCamelCase_ = args.config_file
else:
if not os.path.isdir(lowercase ):
os.makedirs(lowercase )
lowerCamelCase_ = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(lowercase )
else:
config.to_yaml_file(lowercase )
print(f"""accelerate configuration saved at {config_file}""" )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = config_command_parser()
lowerCamelCase_ = parser.parse_args()
config_command(lowercase )
if __name__ == "__main__":
main()
| 700 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : bool = True , A_ : Union[int, float] = 1 / 255 , A_ : bool = True , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , A_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **A_ : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = size if size is not None else {'shortest_edge': 224}
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ = int((256 / 224) * size['shortest_edge'] )
lowerCamelCase_ = get_resize_output_image_size(A_ , size=A_ , default_to_square=A_ )
lowerCamelCase_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
A_ , size=(size_dict['height'], size_dict['width']) , resample=A_ , data_format=A_ , **A_ )
def a__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Any , ) -> np.ndarray:
"""simple docstring"""
lowerCamelCase_ = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A_ , size=(size['height'], size['width']) , data_format=A_ , **A_ )
def a__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def a__ ( self : List[str] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def a__ ( self : Optional[int] , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : PILImageResampling = None , A_ : Optional[bool] = None , A_ : Optional[Dict[str, int]] = None , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[Union[float, Iterable[float]]] = None , A_ : Optional[TensorType] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : List[Any] , ) -> BatchFeature:
"""simple docstring"""
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(A_ , default_to_square=A_ )
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(A_ , param_name='crop_size' )
lowerCamelCase_ = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(A_ ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(A_ , A_ , A_ ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(A_ , A_ ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(A_ , A_ ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(A_ , A_ , A_ ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_ = {'pixel_values': images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 651 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase :
def a__ ( self , _a , _a , _a ) -> str:
return None
class lowercase :
def a__ ( self , _a , _a , _a , _a ) -> Union[str, Any]:
return None
class lowercase ( unittest.TestCase ):
_a = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_a , """tf""" , 12 , **_a )
@require_torch
@slow
def a__ ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_a , """pt""" , 12 , **_a )
@require_torch
@slow
def a__ ( self ) -> Union[str, Any]:
from transformers import BertModel
_A : Any = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(_a ) )
vocab_file.flush()
_A : List[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_A : Tuple = BertModel(BertConfig(vocab_size=len(_a ) ) )
model.save_pretrained(_a )
self._test_export(_a , """pt""" , 12 , _a )
@require_tf
@slow
def a__ ( self ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_A : List[str] = self._test_export(_a , """tf""" , 12 , **_a )
_A : int = quantize(Path(_a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_A : List[Any] = self._test_export(_a , """pt""" , 12 , **_a )
_A : Optional[int] = quantize(_a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self , _a , _a , _a , _a=None , **_a ) -> Tuple:
try:
# Compute path
with TemporaryDirectory() as tempdir:
_A : Dict = Path(_a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_a , _a , _a , _a , _a , **_a )
return path
except Exception as e:
self.fail(_a )
@require_torch
@require_tokenizers
@slow
def a__ ( self ) -> List[Any]:
from transformers import BertModel
_A : Any = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
_A : Optional[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_a , _a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self ) -> int:
from transformers import TFBertModel
_A : Tuple = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
_A : Tuple = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(_a , _a , """tf""" )
def a__ ( self , _a , _a , _a ) -> str:
_A : Union[str, Any] = FeatureExtractionPipeline(_a , _a )
_A : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
_A , _A , _A , _A : Dict = infer_shapes(_a , _a )
# Assert all variables are present
self.assertEqual(len(_a ) , len(_a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _a )
self.assertSequenceEqual(variable_names[3:] , _a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self ) -> Tuple:
_A : Any = ["""input_ids""", """attention_mask""", """token_type_ids"""]
_A : Optional[Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
_A , _A : List[Any] = ensure_valid_input(FuncContiguousArgs() , _a , _a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_a ) , set(_a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_A , _A : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , _a , _a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_a ) , 1 )
self.assertEqual(len(_a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self ) -> Optional[int]:
_A : Any = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 307 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ) -> Optional[int]:
_A : List[Any] = parent
_A : List[Any] = batch_size
_A : Dict = seq_length
_A : Optional[Any] = is_training
_A : int = use_attention_mask
_A : int = use_token_type_ids
_A : List[Any] = use_labels
_A : List[str] = vocab_size
_A : List[Any] = hidden_size
_A : str = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : List[Any] = intermediate_size
_A : Any = hidden_act
_A : int = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : Optional[int] = type_vocab_size
_A : List[str] = type_sequence_label_size
_A : Dict = initializer_range
_A : List[Any] = num_choices
def a__ ( self ) -> int:
_A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A : Optional[Any] = None
if self.use_attention_mask:
_A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
_A : Optional[int] = None
if self.use_token_type_ids:
_A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A : Optional[int] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ ( self ) -> List[str]:
_A : Tuple = self.prepare_config_and_inputs()
_A , _A , _A , _A : str = config_and_inputs
_A : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def a__ ( self ) -> int:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A , _A : int = config_and_inputs
_A : int = True
_A : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = True
_a = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ ( self ) -> List[Any]:
_A : Optional[Any] = FlaxRobertaModelTester(self )
@slow
def a__ ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
_A : Optional[int] = model_class_name.from_pretrained("""roberta-base""" , from_pt=_a )
_A : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 307 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class snake_case__ (_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __lowerCamelCase : int = 6_55_36 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "fourier" , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __lowerCamelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __lowerCamelCase : Tuple[str] = "UNetMidBlock1D" , __lowerCamelCase : str = None , __lowerCamelCase : Tuple[int] = (32, 32, 64) , __lowerCamelCase : str = None , __lowerCamelCase : int = 8 , __lowerCamelCase : int = 1 , __lowerCamelCase : bool = False , ) -> List[str]:
super().__init__()
a = sample_size
# time
if time_embedding_type == "fourier":
a = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__lowerCamelCase , log=__lowerCamelCase , flip_sin_to_cos=__lowerCamelCase )
a = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
a = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__lowerCamelCase , downscale_freq_shift=__lowerCamelCase )
a = block_out_channels[0]
if use_timestep_embedding:
a = block_out_channels[0] * 4
a = TimestepEmbedding(
in_channels=__lowerCamelCase , time_embed_dim=__lowerCamelCase , act_fn=__lowerCamelCase , out_dim=block_out_channels[0] , )
a = nn.ModuleList([] )
a = None
a = nn.ModuleList([] )
a = None
# down
a = in_channels
for i, down_block_type in enumerate(__lowerCamelCase ):
a = output_channel
a = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
a = i == len(__lowerCamelCase ) - 1
a = get_down_block(
__lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__lowerCamelCase )
# mid
a = get_mid_block(
__lowerCamelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__lowerCamelCase , add_downsample=__lowerCamelCase , )
# up
a = list(reversed(__lowerCamelCase ) )
a = reversed_block_out_channels[0]
if out_block_type is None:
a = out_channels
else:
a = block_out_channels[0]
for i, up_block_type in enumerate(__lowerCamelCase ):
a = output_channel
a = (
reversed_block_out_channels[i + 1] if i < len(__lowerCamelCase ) - 1 else final_upsample_channels
)
a = i == len(__lowerCamelCase ) - 1
a = get_up_block(
__lowerCamelCase , num_layers=__lowerCamelCase , in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__lowerCamelCase )
a = output_channel
# out
a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
a = get_out_block(
out_block_type=__lowerCamelCase , num_groups_out=__lowerCamelCase , embed_dim=block_out_channels[0] , out_channels=__lowerCamelCase , act_fn=__lowerCamelCase , fc_dim=block_out_channels[-1] // 4 , )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Union[torch.Tensor, float, int] , __lowerCamelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
a = timestep
if not torch.is_tensor(__lowerCamelCase ):
a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__lowerCamelCase ) and len(timesteps.shape ) == 0:
a = timesteps[None].to(sample.device )
a = self.time_proj(__lowerCamelCase )
if self.config.use_timestep_embedding:
a = self.time_mlp(__lowerCamelCase )
else:
a = timestep_embed[..., None]
a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
a = ()
for downsample_block in self.down_blocks:
a , a = downsample_block(hidden_states=__lowerCamelCase , temb=__lowerCamelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
a = self.mid_block(__lowerCamelCase , __lowerCamelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
a = down_block_res_samples[-1:]
a = down_block_res_samples[:-1]
a = upsample_block(__lowerCamelCase , res_hidden_states_tuple=__lowerCamelCase , temb=__lowerCamelCase )
# 5. post-process
if self.out_block:
a = self.out_block(__lowerCamelCase , __lowerCamelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__lowerCamelCase )
| 662 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : Optional[int] = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
a__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 188 |
from __future__ import annotations
def UpperCAmelCase_ ( _UpperCAmelCase :list[float] , _UpperCAmelCase :list[float] ) -> float:
'''simple docstring'''
A_ = sorted(numsa + numsa )
A_ , A_ = divmod(len(_UpperCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
a__ : int = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 188 | 1 |
import argparse
import os
import re
__UpperCAmelCase : List[str] = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__UpperCAmelCase : Dict = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
__UpperCAmelCase : Dict = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False) -> Dict:
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""") as f:
__snake_case: int = f.read()
__snake_case: str = content.split("""\n""")
__snake_case: Tuple = []
__snake_case: Optional[int] = 0
while line_idx < len(SCREAMING_SNAKE_CASE__):
if _re_intro_mapping.search(lines[line_idx]) is not None:
__snake_case: Any = len(re.search(r"""^(\s*)\S""" , lines[line_idx]).groups()[0]) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """("""):
new_lines.append(lines[line_idx])
line_idx += 1
__snake_case: Any = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case: Any = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)"""):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1]))
else:
blocks.append(lines[line_idx])
line_idx += 1
# Sort blocks by their identifiers
__snake_case: Dict = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: _re_identifier.search(SCREAMING_SNAKE_CASE__).groups()[0])
new_lines += blocks
else:
new_lines.append(lines[line_idx])
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write("""\n""".join(SCREAMING_SNAKE_CASE__))
elif "\n".join(SCREAMING_SNAKE_CASE__) != content:
return True
def A__ ( SCREAMING_SNAKE_CASE__ = False) -> Dict:
__snake_case: Optional[int] = [os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for f in os.listdir(SCREAMING_SNAKE_CASE__) if f.endswith(""".py""")]
__snake_case: List[Any] = [sort_auto_mapping(SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE__):
__snake_case: List[str] = [f for f, d in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {', '.join(SCREAMING_SNAKE_CASE__)}. Run `make style` to fix'''
""" this.""")
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__UpperCAmelCase : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 701 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 155 | 0 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
_lowerCamelCase = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
_lowerCamelCase = '''▁'''
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Union[str, Any]="</s>" , _lowerCamelCase :List[Any]="<unk>" , _lowerCamelCase :Union[str, Any]="<pad>" , _lowerCamelCase :int=1_0_0 , _lowerCamelCase :Union[str, Any]=None , _lowerCamelCase :Optional[Dict[str, Any]] = None , _lowerCamelCase :int=True , **_lowerCamelCase :List[Any] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(_lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__SCREAMING_SNAKE_CASE : Optional[int] = len(set(filter(lambda _lowerCamelCase : bool('''extra_id''' in str(_lowerCamelCase ) ) , _lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = legacy
__SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , extra_ids=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_lowerCamelCase , **_lowerCamelCase , )
__SCREAMING_SNAKE_CASE : Tuple = vocab_file
__SCREAMING_SNAKE_CASE : List[str] = extra_ids
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( _lowerCamelCase :str , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :int ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Any = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _lowerCamelCase , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowerCamelCase )) + [1]
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
return list(
set(filter(lambda _lowerCamelCase : bool(re.search(r'''<extra_id_\d+>''' , _lowerCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
return [self._convert_token_to_id(_lowerCamelCase ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE_ ( self :Any , _lowerCamelCase :List[int] ):
if len(_lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
__SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self._add_eos_if_not_present(_lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_eos_if_not_present(_lowerCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self :Optional[Any] , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :"TextInput" , **_lowerCamelCase :str ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__SCREAMING_SNAKE_CASE : Dict = SPIECE_UNDERLINE + text.replace(_lowerCamelCase , ''' ''' )
return super().tokenize(_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :List[Any] , **_lowerCamelCase :Dict ):
if not self.legacy:
__SCREAMING_SNAKE_CASE : str = text.startswith(_lowerCamelCase )
if is_first:
__SCREAMING_SNAKE_CASE : str = text[1:]
__SCREAMING_SNAKE_CASE : Tuple = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :Optional[Any] ):
if token.startswith('''<extra_id_''' ):
__SCREAMING_SNAKE_CASE : Tuple = re.match(r'''<extra_id_(\d+)>''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :Optional[int] ):
if index < self.sp_model.get_piece_size():
__SCREAMING_SNAKE_CASE : List[Any] = self.sp_model.IdToPiece(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Dict = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :Any ):
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Dict = ''''''
__SCREAMING_SNAKE_CASE : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : str = []
else:
current_sub_tokens.append(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
__SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 674 | 0 |
import math
def __A(lowerCAmelCase ) -> bool:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_UpperCamelCase = range(3 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __A(lowerCAmelCase , lowerCAmelCase=1 , **lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = factor * value
_UpperCamelCase = value
while not is_prime(lowerCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **lowerCAmelCase )
return value
| 714 |
lowerCamelCase__ = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 202 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __snake_case :
def __init__( self : str , __lowerCAmelCase : Collection[float] | None = None ):
"""simple docstring"""
if components is None:
_lowerCamelCase : Tuple = []
_lowerCamelCase : int = list(__lowerCAmelCase )
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.__components )
def __str__( self : str ):
"""simple docstring"""
return "(" + ",".join(map(__lowerCAmelCase , self.__components ) ) + ")"
def __add__( self : Dict , __lowerCAmelCase : Vector ):
"""simple docstring"""
_lowerCamelCase : List[str] = len(self )
if size == len(__lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = [self.__components[i] + other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
else:
raise Exception('''must have the same size''' )
def __sub__( self : List[Any] , __lowerCAmelCase : Vector ):
"""simple docstring"""
_lowerCamelCase : str = len(self )
if size == len(__lowerCAmelCase ):
_lowerCamelCase : Tuple = [self.__components[i] - other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return Vector(__lowerCAmelCase )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self : Optional[Any] , __lowerCAmelCase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Union[str, Any] , __lowerCAmelCase : Vector ):
"""simple docstring"""
...
def __mul__( self : Optional[int] , __lowerCAmelCase : float | Vector ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , (float, int) ):
_lowerCamelCase : Any = [c * other for c in self.__components]
return Vector(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(self ) == len(__lowerCAmelCase ):
_lowerCamelCase : int = len(self )
_lowerCamelCase : int = [self.__components[i] * other.component(__lowerCAmelCase ) for i in range(__lowerCAmelCase )]
return sum(__lowerCAmelCase )
else: # error case
raise Exception('''invalid operand!''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return Vector(self.__components )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : float ):
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
_lowerCamelCase : List[Any] = value
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
_lowerCamelCase : Any = [c**2 for c in self.__components]
return math.sqrt(sum(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Vector , __lowerCAmelCase : bool = False ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self * other
_lowerCamelCase : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ )
return Vector([0] * dimension )
def snake_case_ ( A_ : int, A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (isinstance(A_, A_ ))
_lowerCamelCase : Any = [0] * dimension
_lowerCamelCase : str = 1
return Vector(A_ )
def snake_case_ ( A_ : float, A_ : Vector, A_ : Vector ):
'''simple docstring'''
assert (
isinstance(A_, A_ )
and isinstance(A_, A_ )
and (isinstance(A_, (int, float) ))
)
return x * scalar + y
def snake_case_ ( A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
random.seed(A_ )
_lowerCamelCase : Tuple = [random.randint(A_, A_ ) for _ in range(A_ )]
return Vector(A_ )
class __snake_case :
def __init__( self : Optional[Any] , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : str = matrix
_lowerCamelCase : str = w
_lowerCamelCase : Union[str, Any] = h
def __str__( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , __lowerCAmelCase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
_lowerCamelCase : Optional[int] = []
for i in range(self.__height ):
_lowerCamelCase : Dict = [
self.__matrix[i][j] + other.component(__lowerCAmelCase , __lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(__lowerCAmelCase )
return Matrix(__lowerCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self : List[Any] , __lowerCAmelCase : Matrix ):
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
_lowerCamelCase : List[Any] = []
for i in range(self.__height ):
_lowerCamelCase : List[str] = [
self.__matrix[i][j] - other.component(__lowerCAmelCase , __lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(__lowerCAmelCase )
return Matrix(__lowerCAmelCase , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self : Any , __lowerCAmelCase : float ):
"""simple docstring"""
...
@overload
def __mul__( self : Tuple , __lowerCAmelCase : Vector ):
"""simple docstring"""
...
def __mul__( self : Optional[int] , __lowerCAmelCase : float | Vector ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # matrix-vector
if len(__lowerCAmelCase ) == self.__width:
_lowerCamelCase : Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
_lowerCamelCase : str = [
self.__matrix[i][j] * other.component(__lowerCAmelCase )
for j in range(self.__width )
]
ans.change_component(__lowerCAmelCase , sum(__lowerCAmelCase ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(__lowerCAmelCase , (int, float) ): # matrix-scalar
_lowerCamelCase : Optional[Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowerCAmelCase , self.__width , self.__height )
return None
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.__height
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.__width
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float ):
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
_lowerCamelCase : Tuple = value
else:
raise Exception('''change_component: indices out of bounds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
_lowerCamelCase : List[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : int = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowerCAmelCase , self.__width - 1 , self.__height - 1 ).determinant()
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowerCAmelCase , __lowerCAmelCase )
else:
raise Exception('''Indices out of bounds''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_lowerCamelCase : Optional[Any] = [
self.__matrix[0][y] * self.cofactor(0 , __lowerCAmelCase ) for y in range(self.__width )
]
return sum(__lowerCAmelCase )
def snake_case_ ( A_ : int ):
'''simple docstring'''
_lowerCamelCase : list[list[float]] = [[0] * n for _ in range(A_ )]
return Matrix(A_, A_, A_ )
def snake_case_ ( A_ : int, A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
random.seed(A_ )
_lowerCamelCase : list[list[float]] = [
[random.randint(A_, A_ ) for _ in range(A_ )] for _ in range(A_ )
]
return Matrix(A_, A_, A_ )
| 83 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """conditional_detr"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Optional[Any] , _lowerCAmelCase : int=True , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=3_0_0 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Optional[int]=2_0_4_8 , _lowerCAmelCase : Any=8 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Optional[int]=0.0 , _lowerCAmelCase : int=True , _lowerCAmelCase : List[str]="relu" , _lowerCAmelCase : Optional[int]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : Dict=0.0 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : str=1.0 , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[str]="sine" , _lowerCAmelCase : str="resnet50" , _lowerCAmelCase : Any=True , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=1 , _lowerCAmelCase : Dict=1 , _lowerCAmelCase : Union[str, Any]=2 , _lowerCAmelCase : int=5 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.25 , **_lowerCAmelCase : Any , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowercase =CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(_lowerCAmelCase , _lowerCAmelCase):
__lowercase =backbone_config.get('model_type')
__lowercase =CONFIG_MAPPING[backbone_model_type]
__lowercase =config_class.from_dict(_lowerCAmelCase)
__lowercase =use_timm_backbone
__lowercase =backbone_config
__lowercase =num_channels
__lowercase =num_queries
__lowercase =d_model
__lowercase =encoder_ffn_dim
__lowercase =encoder_layers
__lowercase =encoder_attention_heads
__lowercase =decoder_ffn_dim
__lowercase =decoder_layers
__lowercase =decoder_attention_heads
__lowercase =dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =activation_function
__lowercase =init_std
__lowercase =init_xavier_std
__lowercase =encoder_layerdrop
__lowercase =decoder_layerdrop
__lowercase =encoder_layers
__lowercase =auxiliary_loss
__lowercase =position_embedding_type
__lowercase =backbone
__lowercase =use_pretrained_backbone
__lowercase =dilation
# Hungarian matcher
__lowercase =class_cost
__lowercase =bbox_cost
__lowercase =giou_cost
# Loss coefficients
__lowercase =mask_loss_coefficient
__lowercase =dice_loss_coefficient
__lowercase =cls_loss_coefficient
__lowercase =bbox_loss_coefficient
__lowercase =giou_loss_coefficient
__lowercase =focal_alpha
super().__init__(is_encoder_decoder=_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self.d_model
def __lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__lowercase =copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__lowercase =self.backbone_config.to_dict()
__lowercase =self.__class__.model_type
return output
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("""1.11""" )
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
])
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return 1e-5
@property
def __lowerCamelCase ( self : str):
'''simple docstring'''
return 1_2
| 474 | 0 |
from ...processing_utils import ProcessorMixin
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__a : List[str] = "SpeechT5FeatureExtractor"
__a : Dict = "SpeechT5Tokenizer"
def __init__( self : Any , lowercase : int , lowercase : Any ) -> List[str]:
'''simple docstring'''
super().__init__(lowercase , lowercase )
def __call__( self : Optional[Any] , *lowercase : List[Any] , **lowercase : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = kwargs.pop("""audio""" , lowercase )
UpperCamelCase__ = kwargs.pop("""text""" , lowercase )
UpperCamelCase__ = kwargs.pop("""text_target""" , lowercase )
UpperCamelCase__ = kwargs.pop("""audio_target""" , lowercase )
UpperCamelCase__ = kwargs.pop("""sampling_rate""" , lowercase )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
UpperCamelCase__ = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
elif text is not None:
UpperCamelCase__ = self.tokenizer(lowercase , **lowercase )
else:
UpperCamelCase__ = None
if audio_target is not None:
UpperCamelCase__ = self.feature_extractor(audio_target=lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
UpperCamelCase__ = targets["""input_values"""]
elif text_target is not None:
UpperCamelCase__ = self.tokenizer(lowercase , **lowercase )
UpperCamelCase__ = targets["""input_ids"""]
else:
UpperCamelCase__ = None
if inputs is None:
return targets
if targets is not None:
UpperCamelCase__ = labels
UpperCamelCase__ = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCamelCase__ = decoder_attention_mask
return inputs
def A ( self : List[Any] , *lowercase : Optional[Any] , **lowercase : Dict ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = kwargs.pop("""input_values""" , lowercase )
UpperCamelCase__ = kwargs.pop("""input_ids""" , lowercase )
UpperCamelCase__ = kwargs.pop("""labels""" , lowercase )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
UpperCamelCase__ = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
elif input_ids is not None:
UpperCamelCase__ = self.tokenizer.pad(lowercase , **lowercase )
else:
UpperCamelCase__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowercase , lowercase ) and "input_ids" in labels[0]):
UpperCamelCase__ = self.tokenizer.pad(lowercase , **lowercase )
UpperCamelCase__ = targets["""input_ids"""]
else:
UpperCamelCase__ = self.feature_extractor.feature_size
UpperCamelCase__ = self.feature_extractor.num_mel_bins
UpperCamelCase__ = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
UpperCamelCase__ = feature_size_hack
UpperCamelCase__ = targets["""input_values"""]
else:
UpperCamelCase__ = None
if inputs is None:
return targets
if targets is not None:
UpperCamelCase__ = labels
UpperCamelCase__ = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
UpperCamelCase__ = decoder_attention_mask
return inputs
def A ( self : Union[str, Any] , *lowercase : List[str] , **lowercase : List[str] ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : int , *lowercase : Any , **lowercase : Union[str, Any] ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
| 719 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase_ : List[str] = TypeVar('''T''')
lowerCamelCase_ : Optional[int] = TypeVar('''U''')
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Dict , lowercase : T | None , lowercase : U | None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self : List[Any] ) -> str:
'''simple docstring'''
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
'''simple docstring'''
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__( self : int ) -> str:
'''simple docstring'''
UpperCamelCase__ = ["""DoubleLinkedList"""]
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(lowercase ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase )
def A ( self : str , lowercase : DoubleLinkedListNode[T, U] ) -> None:
'''simple docstring'''
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def A ( self : Any , lowercase : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class _SCREAMING_SNAKE_CASE ( Generic[T, U] ):
'''simple docstring'''
__a : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : int , lowercase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__( self : Any ) -> str:
'''simple docstring'''
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : Any , lowercase : T ) -> bool:
'''simple docstring'''
return key in self.cache
def A ( self : Tuple , lowercase : T ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase )
return node.val
self.miss += 1
return None
def A ( self : Dict , lowercase : T , lowercase : U ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(lowercase , lowercase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(lowercase )
@classmethod
def A ( cls : Optional[int] , lowercase : int = 1_2_8 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(lowercase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(lowercase )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*lowercase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase , """cache_info""" , lowercase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 265 | 0 |
from maths.prime_check import is_prime
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if not isinstance(lowercase , lowercase ):
lowerCamelCase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase )
if is_prime(lowercase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = '''swinv2'''
UpperCamelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict , A_ : List[Any]=224 , A_ : Optional[Any]=4 , A_ : int=3 , A_ : Dict=96 , A_ : Any=[2, 2, 6, 2] , A_ : Optional[Any]=[3, 6, 12, 24] , A_ : Tuple=7 , A_ : Tuple=4.0 , A_ : str=True , A_ : str=0.0 , A_ : Union[str, Any]=0.0 , A_ : Optional[Any]=0.1 , A_ : str="gelu" , A_ : int=False , A_ : str=0.02 , A_ : List[Any]=1E-5 , A_ : Any=32 , **A_ : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embed_dim
lowerCamelCase_ = depths
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = num_heads
lowerCamelCase_ = window_size
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = qkv_bias
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = hidden_act
lowerCamelCase_ = use_absolute_embeddings
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = initializer_range
lowerCamelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCamelCase_ = int(embed_dim * 2 ** (len(A_ ) - 1) )
lowerCamelCase_ = (0, 0, 0, 0)
| 70 | 1 |
def A(__a: Optional[int] , __a: List[str] ):
lowerCAmelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCAmelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCAmelCase_ = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 702 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCamelCase__ = 5_00_03
lowerCamelCase__ = 5_00_02
@require_sentencepiece
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = PLBartTokenizer
lowerCamelCase__ = None
lowerCamelCase__ = False
def __a ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="base" , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> Any:
lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="base" , keep_accents=_a )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowerCAmelCase_ = tokenizer.vocab_size
lowerCAmelCase_ = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 4 , _a )]
self.assertListEqual(_a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowerCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCAmelCase_ = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = PLBartTokenizer(_a , language_codes="multi" , keep_accents=_a )
lowerCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowerCAmelCase_ = tokenizer.vocab_size
lowerCAmelCase_ = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 7 , _a )]
self.assertListEqual(
_a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowerCAmelCase_ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCAmelCase_ = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = '''uclanlp/plbart-python-en_XX'''
lowerCamelCase__ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCamelCase__ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCamelCase__ = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __a ( cls ) -> str:
lowerCAmelCase_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowerCAmelCase_ = 1
return cls
def __a ( self ) -> Optional[int]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def __a ( self ) -> int:
self.assertIn(_a , self.tokenizer.all_special_ids )
lowerCAmelCase_ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCAmelCase_ = self.tokenizer.decode(_a , skip_special_tokens=_a )
lowerCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def __a ( self ) -> str:
lowerCAmelCase_ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , _a )
lowerCAmelCase_ = 10
lowerCAmelCase_ = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _a )
self.assertEqual(len(_a ) , _a )
def __a ( self ) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def __a ( self ) -> str:
lowerCAmelCase_ = tempfile.mkdtemp()
lowerCAmelCase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
lowerCAmelCase_ = PLBartTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors="pt" )
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __a ( self ) -> int:
lowerCAmelCase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCAmelCase_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors="pt" )
lowerCAmelCase_ = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors="pt" )
lowerCAmelCase_ = targets["input_ids"]
lowerCAmelCase_ = shift_tokens_right(_a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(_a ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 226 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ''
__magic_name__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__magic_name__ = None # compression type in fsspec. ex: "gzip"
__magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , snake_case_ = "" , snake_case_ = None , snake_case_ = None , **snake_case_ ):
super().__init__(self , **snake_case_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_A = fsspec.open(
snake_case_ , mode='rb' , protocol=snake_case_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_A = os.path.basename(self.file.path.split('::' )[0] )
_A = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_A = None
@classmethod
def lowerCAmelCase__ ( cls , snake_case_ ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(snake_case_ ).lstrip('/' )
def lowerCAmelCase__ ( self ):
if self.dir_cache is None:
_A = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_A = {f['name']: f}
def lowerCAmelCase__ ( self , snake_case_ ):
return self.file.open().read()
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = "rb" , snake_case_=None , snake_case_=True , snake_case_=None , **snake_case_ , ):
_A = self._strip_protocol(snake_case_ )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'bz2'
__magic_name__ = 'bz2'
__magic_name__ = '.bz2'
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'gzip'
__magic_name__ = 'gzip'
__magic_name__ = '.gz'
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'lz4'
__magic_name__ = 'lz4'
__magic_name__ = '.lz4'
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'xz'
__magic_name__ = 'xz'
__magic_name__ = '.xz'
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'zstd'
__magic_name__ = 'zstd'
__magic_name__ = '.zst'
def __init__( self , snake_case_ , snake_case_ = "rb" , snake_case_ = None , snake_case_ = None , snake_case_ = DEFAULT_BLOCK_SIZE , **snake_case_ , ):
super().__init__(
fo=snake_case_ , mode=snake_case_ , target_protocol=snake_case_ , target_options=snake_case_ , block_size=snake_case_ , **snake_case_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_A = self.file.__enter__
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ ):
_A = file_
def __enter__( self ):
self._file.__enter__()
return self
def __exit__( self , *snake_case_ , **snake_case_ ):
self._file.__exit__(*snake_case_ , **snake_case_ )
def __iter__( self ):
return iter(self._file )
def lowerCAmelCase__ ( self ):
return next(self._file )
def __getattr__( self , snake_case_ ):
return getattr(self._file , snake_case_ )
def fixed_enter(*snake_case_ , **snake_case_ ):
return WrappedFile(_enter(*snake_case_ , **snake_case_ ) )
_A = fixed_enter
| 27 |
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( __lowercase ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , __lowercase , )
| 612 | 0 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Optional[int]:
UpperCAmelCase : Optional[int] = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = 0, 0, 0
UpperCAmelCase : Any = ugly_nums[ia] * 2
UpperCAmelCase : Optional[int] = ugly_nums[ia] * 3
UpperCAmelCase : Optional[Any] = ugly_nums[ia] * 5
for _ in range(1 , _snake_case ):
UpperCAmelCase : List[Any] = min(_snake_case , _snake_case , _snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
UpperCAmelCase : int = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase : Optional[int] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase : str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 711 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : str = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Dict:
with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con:
UpperCAmelCase : Any = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Optional[int]:
UpperCAmelCase : Optional[int] = tmp_path / '''cache'''
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Any = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase : List[str] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : Any = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Tuple = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
with pytest.raises(_lowerCAmelCase ):
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 528 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : BigBirdConfig
UpperCAmelCase_ : jnp.dtype = jnp.floataa
UpperCAmelCase_ : bool = True
def snake_case ( self ) -> Tuple:
super().setup()
A : List[str] = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
A : List[str] = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
A : List[str] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
def cross_entropy(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
A : str = logits.shape[-1]
A : List[str] = (labels[..., None] == jnp.arange(lowerCamelCase_ )[None]).astype('''f4''' )
A : Optional[int] = jax.nn.log_softmax(lowerCamelCase_ , axis=-1 )
A : str = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
A : int = reduction(lowerCamelCase_ )
return loss
A : Dict = partial(lowerCamelCase_ , reduction=jnp.mean )
A : List[str] = cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
A : List[Any] = cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
A : Optional[Any] = cross_entropy(lowerCamelCase_ , lowerCamelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __lowercase :
"""simple docstring"""
UpperCAmelCase_ : str = "google/bigbird-roberta-base"
UpperCAmelCase_ : int = 30_00
UpperCAmelCase_ : int = 1_05_00
UpperCAmelCase_ : int = 1_28
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : int = 5
# tx_args
UpperCAmelCase_ : float = 3e-5
UpperCAmelCase_ : float = 0.0
UpperCAmelCase_ : int = 2_00_00
UpperCAmelCase_ : float = 0.0095
UpperCAmelCase_ : str = "bigbird-roberta-natural-questions"
UpperCAmelCase_ : str = "training-expt"
UpperCAmelCase_ : str = "data/nq-training.jsonl"
UpperCAmelCase_ : str = "data/nq-validation.jsonl"
def snake_case ( self ) -> Tuple:
os.makedirs(self.base_dir , exist_ok=__UpperCAmelCase )
A : Dict = os.path.join(self.base_dir , self.save_dir )
A : Optional[Any] = self.batch_size_per_device * jax.device_count()
@dataclass
class __lowercase :
"""simple docstring"""
UpperCAmelCase_ : int
UpperCAmelCase_ : int = 40_96 # no dynamic padding on TPUs
def __call__( self , __UpperCAmelCase ) -> int:
A : Union[str, Any] = self.collate_fn(__UpperCAmelCase )
A : Any = jax.tree_util.tree_map(__UpperCAmelCase , __UpperCAmelCase )
return batch
def snake_case ( self , __UpperCAmelCase ) -> int:
A , A : Optional[Any] = self.fetch_inputs(features['''input_ids'''] )
A : Optional[int] = {
'''input_ids''': jnp.array(__UpperCAmelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(__UpperCAmelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def snake_case ( self , __UpperCAmelCase ) -> Optional[Any]:
A : Tuple = [self._fetch_inputs(__UpperCAmelCase ) for ids in input_ids]
return zip(*__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ) -> str:
A : Dict = [1 for _ in range(len(__UpperCAmelCase ) )]
while len(__UpperCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ):
if seed is not None:
A : Optional[Any] = dataset.shuffle(seed=lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) // batch_size ):
A : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowerCamelCase_ )
@partial(jax.pmap , axis_name='''batch''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
def loss_fn(lowerCamelCase_ ):
A : Optional[int] = model_inputs.pop('''start_labels''' )
A : List[str] = model_inputs.pop('''end_labels''' )
A : Optional[Any] = model_inputs.pop('''pooled_labels''' )
A : Optional[Any] = state.apply_fn(**lowerCamelCase_ , params=lowerCamelCase_ , dropout_rng=lowerCamelCase_ , train=lowerCamelCase_ )
A , A , A : Any = outputs
return state.loss_fn(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
A , A : Dict = jax.random.split(lowerCamelCase_ )
A : str = jax.value_and_grad(lowerCamelCase_ )
A , A : int = grad_fn(state.params )
A : List[Any] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
A : Any = jax.lax.pmean(lowerCamelCase_ , '''batch''' )
A : int = state.apply_gradients(grads=lowerCamelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def snake_case__ ( lowerCamelCase_ , **lowerCamelCase_ ):
A : Union[str, Any] = model_inputs.pop('''start_labels''' )
A : Optional[Any] = model_inputs.pop('''end_labels''' )
A : str = model_inputs.pop('''pooled_labels''' )
A : Dict = state.apply_fn(**lowerCamelCase_ , params=state.params , train=lowerCamelCase_ )
A , A , A : Optional[int] = outputs
A : Any = state.loss_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A : Optional[Any] = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class __lowercase ( train_state.TrainState ):
"""simple docstring"""
UpperCAmelCase_ : Callable = struct.field(pytree_node=_SCREAMING_SNAKE_CASE )
@dataclass
class __lowercase :
"""simple docstring"""
UpperCAmelCase_ : Args
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : Callable
UpperCAmelCase_ : wandb
UpperCAmelCase_ : Callable = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]:
A : Optional[Any] = model.params
A : Union[str, Any] = TrainState.create(
apply_fn=model.__call__ , params=__UpperCAmelCase , tx=__UpperCAmelCase , loss_fn=__UpperCAmelCase , )
if ckpt_dir is not None:
A , A , A , A , A : List[Any] = restore_checkpoint(__UpperCAmelCase , __UpperCAmelCase )
A : List[str] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
A , A : str = build_tx(**__UpperCAmelCase )
A : List[str] = train_state.TrainState(
step=__UpperCAmelCase , apply_fn=model.__call__ , params=__UpperCAmelCase , tx=__UpperCAmelCase , opt_state=__UpperCAmelCase , )
A : Optional[int] = args
A : Tuple = data_collator
A : Any = lr
A : Dict = params
A : Optional[int] = jax_utils.replicate(__UpperCAmelCase )
return state
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
A : List[Any] = self.args
A : Tuple = len(__UpperCAmelCase ) // args.batch_size
A : Any = jax.random.PRNGKey(0 )
A : int = jax.random.split(__UpperCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
A : Tuple = jnp.array(0 , dtype=jnp.floataa )
A : int = get_batched_dataset(__UpperCAmelCase , args.batch_size , seed=__UpperCAmelCase )
A : Tuple = 0
for batch in tqdm(__UpperCAmelCase , total=__UpperCAmelCase , desc=f'Running EPOCH-{epoch}' ):
A : Any = self.data_collator(__UpperCAmelCase )
A , A , A : Optional[Any] = self.train_step_fn(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
A : Tuple = jax_utils.unreplicate(state.step )
A : List[Any] = running_loss.item() / i
A : Optional[int] = self.scheduler_fn(state_step - 1 )
A : str = self.evaluate(__UpperCAmelCase , __UpperCAmelCase )
A : List[str] = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(__UpperCAmelCase ) )
self.logger.log(__UpperCAmelCase , commit=__UpperCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
A : List[str] = get_batched_dataset(__UpperCAmelCase , self.args.batch_size )
A : int = len(__UpperCAmelCase ) // self.args.batch_size
A : Optional[int] = jnp.array(0 , dtype=jnp.floataa )
A : Optional[int] = 0
for batch in tqdm(__UpperCAmelCase , total=__UpperCAmelCase , desc='''Evaluating ... ''' ):
A : List[str] = self.data_collator(__UpperCAmelCase )
A : List[str] = self.val_step_fn(__UpperCAmelCase , **__UpperCAmelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
A : Optional[int] = jax_utils.unreplicate(__UpperCAmelCase )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=''' ... ''' )
self.model_save_fn(__UpperCAmelCase , params=state.params )
with open(os.path.join(__UpperCAmelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__UpperCAmelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(__UpperCAmelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__UpperCAmelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , __UpperCAmelCase )
print('''DONE''' )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=''' ... ''' )
with open(os.path.join(lowerCamelCase_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
A : List[str] = from_bytes(state.params , f.read() )
with open(os.path.join(lowerCamelCase_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
A : Optional[int] = from_bytes(state.opt_state , f.read() )
A : Dict = joblib.load(os.path.join(lowerCamelCase_ , '''args.joblib''' ) )
A : str = joblib.load(os.path.join(lowerCamelCase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowerCamelCase_ , '''training_state.json''' ) , '''r''' ) as f:
A : Dict = json.load(lowerCamelCase_ )
A : List[Any] = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = num_train_steps - warmup_steps
A : List[Any] = optax.linear_schedule(init_value=lowerCamelCase_ , end_value=lowerCamelCase_ , transition_steps=lowerCamelCase_ )
A : Optional[Any] = optax.linear_schedule(init_value=lowerCamelCase_ , end_value=1E-7 , transition_steps=lowerCamelCase_ )
A : Union[str, Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
def weight_decay_mask(lowerCamelCase_ ):
A : int = traverse_util.flatten_dict(lowerCamelCase_ )
A : List[Any] = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowerCamelCase_ )
A : str = scheduler_fn(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A : str = optax.adamw(learning_rate=lowerCamelCase_ , weight_decay=lowerCamelCase_ , mask=lowerCamelCase_ )
return tx, lr
| 542 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[2, 2, 3, 2] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=["stage2", "stage3", "stage4"] , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) -> Optional[Any]:
A : Tuple = parent
A : Any = batch_size
A : List[Any] = image_size
A : Tuple = num_channels
A : Any = num_stages
A : Any = hidden_sizes
A : List[str] = depths
A : str = is_training
A : Any = use_labels
A : Any = intermediate_size
A : List[str] = hidden_act
A : List[Any] = type_sequence_label_size
A : Optional[int] = initializer_range
A : Tuple = out_features
A : Tuple = num_labels
A : Tuple = scope
A : int = num_stages
def snake_case ( self ) -> Optional[Any]:
A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : Any = None
if self.use_labels:
A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self ) -> Optional[int]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case ( self ) -> Tuple:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__UpperCAmelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__UpperCAmelCase , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
A : List[Any] = UperNetForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
A : int = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self ) -> Tuple:
A : List[str] = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCAmelCase_ : Union[str, Any] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCAmelCase_ : int = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Dict = False
def snake_case ( self ) -> Union[str, Any]:
A : int = UperNetModelTester(self )
A : Any = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def snake_case ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ) -> Optional[int]:
return
def snake_case ( self ) -> Any:
A , A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(__UpperCAmelCase )
A : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Union[str, Any] = [*signature.parameters.keys()]
A : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self ) -> List[str]:
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def snake_case ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def snake_case ( self ) -> int:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self ) -> int:
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def snake_case ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def snake_case ( self ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case ( self ) -> Optional[Any]:
pass
def snake_case ( self ) -> Union[str, Any]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
A : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
A : List[str] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
A : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : List[str] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[str] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Any = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ) -> Tuple:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = _config_zero_init(__UpperCAmelCase )
A : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A : Any = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def snake_case ( self ) -> Tuple:
pass
@slow
def snake_case ( self ) -> str:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Tuple = UperNetForSemanticSegmentation.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def snake_case__ ( ):
A : Tuple = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
A : Optional[Any] = Image.open(lowerCamelCase_ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> Dict:
A : List[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
A : List[str] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__UpperCAmelCase )
A : List[str] = prepare_img()
A : str = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
with torch.no_grad():
A : str = model(**__UpperCAmelCase )
A : Optional[int] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
A : List[Any] = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
def snake_case ( self ) -> int:
A : List[str] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
A : Any = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__UpperCAmelCase )
A : str = prepare_img()
A : List[Any] = processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
with torch.no_grad():
A : List[Any] = model(**__UpperCAmelCase )
A : List[str] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
A : Tuple = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 542 | 1 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Optional[int] = logging.getLogger(__name__)
class a_ ( UpperCamelCase_ ):
'''simple docstring'''
def _snake_case ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any]=None , __UpperCamelCase : int=None ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.layer[current_layer](_a , _a , head_mask[current_layer] )
_UpperCAmelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , UpperCamelCase_ , )
class a_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCamelCase : List[str] ) ->Dict:
'''simple docstring'''
super().__init__(_a )
_UpperCAmelCase = BertEncoderWithPabee(_a )
self.init_weights()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def _snake_case ( self : int , __UpperCamelCase : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = threshold
def _snake_case ( self : Dict , __UpperCamelCase : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = patience
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.inference_layers_num / self.inference_instances_num
_UpperCAmelCase = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def _snake_case ( self : Any , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : str=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Any=False , ) ->List[str]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_UpperCAmelCase = input_ids.size()
elif inputs_embeds is not None:
_UpperCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_UpperCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_UpperCAmelCase = torch.ones(_a , device=_a )
if token_type_ids is None:
_UpperCAmelCase = torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_UpperCAmelCase = self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_UpperCAmelCase = encoder_hidden_states.size()
_UpperCAmelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_UpperCAmelCase = torch.ones(_a , device=_a )
_UpperCAmelCase = self.invert_attention_mask(_a )
else:
_UpperCAmelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_UpperCAmelCase = self.get_head_mask(_a , self.config.num_hidden_layers )
_UpperCAmelCase = self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_UpperCAmelCase = embedding_output
if self.training:
_UpperCAmelCase = []
for i in range(self.config.num_hidden_layers ):
_UpperCAmelCase = self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_UpperCAmelCase = self.pooler(_a )
_UpperCAmelCase = output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_UpperCAmelCase = self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_UpperCAmelCase = self.pooler(encoder_outputs[0] )
_UpperCAmelCase = [output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_UpperCAmelCase = self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_UpperCAmelCase = self.pooler(_a )
_UpperCAmelCase = output_layers[i](_a )
if regression:
_UpperCAmelCase = logits.detach()
if patient_result is not None:
_UpperCAmelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
_UpperCAmelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_UpperCAmelCase = 0
_UpperCAmelCase = logits
if patient_counter == self.patience:
break
_UpperCAmelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , UpperCamelCase_ , )
class a_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(_a )
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = BertModelWithPabee(_a )
_UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_UpperCAmelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def _snake_case ( self : int , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Any=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : str=None , __UpperCamelCase : Union[str, Any]=None , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_UpperCAmelCase = (logits[-1],)
if labels is not None:
_UpperCAmelCase = None
_UpperCAmelCase = 0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase = MSELoss()
_UpperCAmelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_UpperCAmelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_UpperCAmelCase = (total_loss / total_weights,) + outputs
return outputs | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
from string import ascii_uppercase
__lowerCamelCase = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase = dict(enumerate(ascii_uppercase))
def a ( __snake_case : str, __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :int = len(__snake_case )
UpperCAmelCase_ :Tuple = 0
while True:
if x == i:
UpperCAmelCase_ :Dict = 0
if len(__snake_case ) == len(__snake_case ):
break
key += key[i]
i += 1
return key
def a ( __snake_case : str, __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :Union[str, Any] = ''''''
UpperCAmelCase_ :List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
UpperCAmelCase_ :Tuple = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def a ( __snake_case : str, __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :List[str] = ''''''
UpperCAmelCase_ :Tuple = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
UpperCAmelCase_ :int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :List[str] = '''THE GERMAN ATTACK'''
UpperCAmelCase_ :Union[str, Any] = '''SECRET'''
UpperCAmelCase_ :str = generate_key(__snake_case, __snake_case )
UpperCAmelCase_ :List[str] = cipher_text(__snake_case, __snake_case )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(__snake_case, __snake_case )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 608 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self : Any ):
UpperCAmelCase_ :List[Any] = tempfile.mkdtemp()
UpperCAmelCase_ :List[Any] = BlipImageProcessor()
UpperCAmelCase_ :str = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
UpperCAmelCase_ :Dict = BlipProcessor(snake_case , snake_case )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Dict , **snake_case : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer
def snake_case_ ( self : List[str] , **snake_case : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def snake_case_ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ):
UpperCAmelCase_ :Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ :str = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Tuple = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ :Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase_ :Union[str, Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
UpperCAmelCase_ :Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def snake_case_ ( self : int ):
UpperCAmelCase_ :Tuple = self.get_image_processor()
UpperCAmelCase_ :Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ :int = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ :Union[str, Any] = image_processor(snake_case , return_tensors='''np''' )
UpperCAmelCase_ :Any = processor(images=snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : str ):
UpperCAmelCase_ :List[str] = self.get_image_processor()
UpperCAmelCase_ :Tuple = self.get_tokenizer()
UpperCAmelCase_ :List[Any] = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Dict = '''lower newer'''
UpperCAmelCase_ :str = processor(text=snake_case )
UpperCAmelCase_ :Any = tokenizer(snake_case , return_token_type_ids=snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Dict = self.get_image_processor()
UpperCAmelCase_ :Tuple = self.get_tokenizer()
UpperCAmelCase_ :Dict = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Union[str, Any] = '''lower newer'''
UpperCAmelCase_ :Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ :Any = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ :Optional[int] = self.get_tokenizer()
UpperCAmelCase_ :int = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ :Optional[Any] = processor.batch_decode(snake_case )
UpperCAmelCase_ :Dict = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def snake_case_ ( self : str ):
UpperCAmelCase_ :str = self.get_image_processor()
UpperCAmelCase_ :Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ :Dict = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :List[Any] = '''lower newer'''
UpperCAmelCase_ :List[str] = self.prepare_image_inputs()
UpperCAmelCase_ :List[str] = processor(text=snake_case , images=snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 608 | 1 |
'''simple docstring'''
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 707 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase_ = set()
return any(
node not in visited and depth_first_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for node in graph )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
visited.add(lowerCAmelCase__ )
rec_stk.add(lowerCAmelCase__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCAmelCase__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A : Dict = logging.get_logger(__name__)
@add_end_docstrings(a )
class __A( a ):
def __init__( self , *_snake_case , **_snake_case ) -> Optional[int]:
'''simple docstring'''
super().__init__(*_snake_case , **_snake_case )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=None , _snake_case=None , _snake_case=None ) -> Tuple:
'''simple docstring'''
__a = {}
__a = {}
if prompt is not None:
__a = prompt
if generate_kwargs is not None:
__a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
__a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
return super().__call__(_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> Optional[int]:
'''simple docstring'''
__a = load_image(_snake_case )
if prompt is not None:
if not isinstance(_snake_case , _snake_case ):
raise ValueError(
F"""Received an invalid text input, got - {type(_snake_case )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
__a = self.model.config.model_type
if model_type == "git":
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
__a = self.tokenizer(text=_snake_case , add_special_tokens=_snake_case ).input_ids
__a = [self.tokenizer.cls_token_id] + input_ids
__a = torch.tensor(_snake_case ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
__a = self.image_processor(images=_snake_case , header_text=_snake_case , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
__a = self.tokenizer(_snake_case , return_tensors=self.framework )
model_inputs.update(_snake_case )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
__a = self.image_processor(images=_snake_case , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__a = None
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> str:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , _snake_case )
and all(x is None for x in model_inputs['''input_ids'''] )
):
__a = None
if generate_kwargs is None:
__a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__a = model_inputs.pop(self.model.main_input_name )
__a = self.model.generate(_snake_case , **_snake_case , **_snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = []
for output_ids in model_outputs:
__a = {
'''generated_text''': self.tokenizer.decode(
_snake_case , skip_special_tokens=_snake_case , )
}
records.append(_snake_case )
return records | 219 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
def _A (__a ) -> List[str]:
"""simple docstring"""
for char in word:
SCREAMING_SNAKE_CASE_ : int = ord(SCREAMING_SNAKE_CASE__ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ):
return 0
return 1
def _A (__a ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = set()
for token in tokens:
SCREAMING_SNAKE_CASE_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
return word_list
def _A (__a , __a ) -> Optional[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
SCREAMING_SNAKE_CASE_ : Dict = max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bert_tokens
SCREAMING_SNAKE_CASE_ : int = 0, len(SCREAMING_SNAKE_CASE__ )
while start < end:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
if is_chinese(bert_word[start] ):
SCREAMING_SNAKE_CASE_ : Dict = min(end - start , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ):
SCREAMING_SNAKE_CASE_ : str = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
SCREAMING_SNAKE_CASE_ : List[str] = """##""" + bert_word[j]
SCREAMING_SNAKE_CASE_ : Tuple = start + i
SCREAMING_SNAKE_CASE_ : str = False
break
if single_word:
start += 1
return bert_word
def _A (__a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_00 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['''cws'''] ).cws
SCREAMING_SNAKE_CASE_ : List[Any] = [get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_00 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Dict = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for id in input_ids:
SCREAMING_SNAKE_CASE_ : Optional[int] = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ )
input_tokens.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
if token[:2] == "##":
SCREAMING_SNAKE_CASE_ : str = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ):
ref_id.append(SCREAMING_SNAKE_CASE__ )
ref_ids.append(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
return ref_ids
def _A (__a ) -> Dict:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.readlines()
SCREAMING_SNAKE_CASE_ : Optional[Any] = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
SCREAMING_SNAKE_CASE_ : Tuple = LTP(args.ltp ) # faster in GPU device
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
UpperCAmelCase_ : Dict = parser.parse_args()
main(args)
| 704 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *lowercase_ : Any , **lowercase_ : Any):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_)
requires_backends(self , '''vision''')
self.check_model_type(lowercase_)
def __call__( self : int , lowercase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowercase_ : Optional[Any]):
'''simple docstring'''
return super().__call__(lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowercase_ : List[str]):
'''simple docstring'''
return {}, {}, {}
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = load_image(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = image.size
SCREAMING_SNAKE_CASE_ : int = self.image_processor(images=lowercase_ , return_tensors=self.framework)
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.model(**lowercase_)
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE_ : List[Any] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[str] = (output * 255 / np.max(lowercase_)).astype('''uint8''')
SCREAMING_SNAKE_CASE_ : List[Any] = Image.fromarray(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = predicted_depth
SCREAMING_SNAKE_CASE_ : Optional[int] = depth
return output_dict
| 176 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : List[str] = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ['''YolosFeatureExtractor''']
__snake_case : int = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , SCREAMING_SNAKE_CASE , )
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = RobertaConfig
SCREAMING_SNAKE_CASE = "roberta"
def __init__( self : int , snake_case_ : str)-> Optional[Any]:
super().__init__(snake_case_)
__lowerCAmelCase =RobertaEmbeddings(snake_case_)
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE , )
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = RobertaConfig
SCREAMING_SNAKE_CASE = "roberta"
def __init__( self : int , snake_case_ : Dict)-> Any:
super().__init__(snake_case_)
__lowerCAmelCase =config.num_labels
__lowerCAmelCase =config.num_hidden_layers
__lowerCAmelCase =DeeRobertaModel(snake_case_)
__lowerCAmelCase =nn.Dropout(config.hidden_dropout_prob)
__lowerCAmelCase =nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(snake_case_)
def UpperCamelCase ( self : int , snake_case_ : Any=None , snake_case_ : Tuple=None , snake_case_ : int=None , snake_case_ : List[Any]=None , snake_case_ : List[Any]=None , snake_case_ : Tuple=None , snake_case_ : Dict=None , snake_case_ : Dict=-1 , snake_case_ : Any=False , )-> Optional[Any]:
__lowerCAmelCase =self.num_layers
try:
__lowerCAmelCase =self.roberta(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , position_ids=snake_case_ , head_mask=snake_case_ , inputs_embeds=snake_case_ , )
__lowerCAmelCase =outputs[1]
__lowerCAmelCase =self.dropout(snake_case_)
__lowerCAmelCase =self.classifier(snake_case_)
__lowerCAmelCase =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCAmelCase =e.message
__lowerCAmelCase =e.exit_layer
__lowerCAmelCase =outputs[0]
if not self.training:
__lowerCAmelCase =entropy(snake_case_)
__lowerCAmelCase =[]
__lowerCAmelCase =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase =MSELoss()
__lowerCAmelCase =loss_fct(logits.view(-1) , labels.view(-1))
else:
__lowerCAmelCase =CrossEntropyLoss()
__lowerCAmelCase =loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__lowerCAmelCase =[]
for highway_exit in outputs[-1]:
__lowerCAmelCase =highway_exit[0]
if not self.training:
highway_logits_all.append(snake_case_)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__lowerCAmelCase =MSELoss()
__lowerCAmelCase =loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__lowerCAmelCase =CrossEntropyLoss()
__lowerCAmelCase =loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(snake_case_)
if train_highway:
__lowerCAmelCase =(sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__lowerCAmelCase =(loss,) + outputs
if not self.training:
__lowerCAmelCase =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCAmelCase =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 456 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase_ = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def __lowerCAmelCase ( __lowerCamelCase : List[str] ) -> int:
__lowerCAmelCase =(images / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase =images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowerCAmelCase =numpy_to_pil(__lowerCamelCase )
return images
def __lowerCAmelCase ( __lowerCamelCase : str ) -> str:
if images.ndim == 3:
__lowerCAmelCase =images[None, ...]
__lowerCAmelCase =(images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__lowerCAmelCase =[Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
__lowerCAmelCase =[Image.fromarray(__lowerCamelCase ) for image in images]
return pil_images
| 456 | 1 |
'''simple docstring'''
def UpperCAmelCase ( A : list[list[int | float]] ):
SCREAMING_SNAKE_CASE : str = len(A )
SCREAMING_SNAKE_CASE : Optional[int] = len(matrix[0] )
SCREAMING_SNAKE_CASE : Optional[Any] = min(A , A )
for row in range(A ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , A ):
SCREAMING_SNAKE_CASE : Dict = matrix[col][row] / matrix[row][row]
for i in range(A , A ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE : List[Any] = True
for i in range(row + 1 , A ):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE : List[Any] = False
break
if reduce:
rank -= 1
for i in range(A ):
SCREAMING_SNAKE_CASE : Optional[int] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=lowerCAmelCase__ , )
assert hasattr(self , '''env''' )
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE : Tuple = {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE : str = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE : Optional[Any] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase__ , py_version='''py36''' , )
def __lowercase ( self : Dict , lowerCAmelCase__ : int ):
"""simple docstring"""
TrainingJobAnalytics(lowerCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowercase ( self : Dict , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
# create estimator
SCREAMING_SNAKE_CASE : Any = self.create_estimator(lowerCAmelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase__ )
| 527 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , A_ : int , A_ : int , A_ : int , A_ : int=0.0 , A_ : Optional[int] = None , A_ : str = "geglu" , A_ : Optional[int] = None , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = False , A_ : bool = True , A_ : str = "layer_norm" , A_ : bool = False , ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = only_cross_attention
_lowerCAmelCase : str = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_lowerCAmelCase : List[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_lowerCAmelCase : List[Any] = AdaLayerNorm(A_ , A_ )
elif self.use_ada_layer_norm_zero:
_lowerCAmelCase : List[Any] = AdaLayerNormZero(A_ , A_ )
else:
_lowerCAmelCase : Dict = nn.LayerNorm(A_ , elementwise_affine=A_ )
_lowerCAmelCase : Dict = Attention(
query_dim=A_ , heads=A_ , dim_head=A_ , dropout=A_ , bias=A_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=A_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_lowerCAmelCase : List[str] = (
AdaLayerNorm(A_ , A_ )
if self.use_ada_layer_norm
else nn.LayerNorm(A_ , elementwise_affine=A_ )
)
_lowerCAmelCase : int = Attention(
query_dim=A_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=A_ , dim_head=A_ , dropout=A_ , bias=A_ , upcast_attention=A_ , ) # is self-attn if encoder_hidden_states is none
else:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[int] = None
# 3. Feed-forward
_lowerCAmelCase : str = nn.LayerNorm(A_ , elementwise_affine=A_ )
_lowerCAmelCase : List[str] = FeedForward(A_ , dropout=A_ , activation_fn=A_ , final_dropout=A_ )
# let chunk size default to None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : int = 0
def __magic_name__ ( self : Optional[Any] , A_ : Optional[int] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = chunk_size
_lowerCAmelCase : Dict = dim
def __magic_name__ ( self : Optional[int] , A_ : torch.FloatTensor , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.FloatTensor] = None , A_ : Optional[torch.LongTensor] = None , A_ : Dict[str, Any] = None , A_ : Optional[torch.LongTensor] = None , ):
'''simple docstring'''
if self.use_ada_layer_norm:
_lowerCAmelCase : List[Any] = self.norma(A_ , A_ )
elif self.use_ada_layer_norm_zero:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.norma(
A_ , A_ , A_ , hidden_dtype=hidden_states.dtype )
else:
_lowerCAmelCase : str = self.norma(A_ )
_lowerCAmelCase : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_lowerCAmelCase : Optional[Any] = self.attna(
A_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=A_ , **A_ , )
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : Dict = gate_msa.unsqueeze(1 ) * attn_output
_lowerCAmelCase : Optional[Any] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_lowerCAmelCase : List[str] = (
self.norma(A_ , A_ ) if self.use_ada_layer_norm else self.norma(A_ )
)
_lowerCAmelCase : Tuple = self.attna(
A_ , encoder_hidden_states=A_ , attention_mask=A_ , **A_ , )
_lowerCAmelCase : List[str] = attn_output + hidden_states
# 3. Feed-forward
_lowerCAmelCase : Union[str, Any] = self.norma(A_ )
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
_lowerCAmelCase : Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_lowerCAmelCase : Any = torch.cat(
[self.ff(A_ ) for hid_slice in norm_hidden_states.chunk(A_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_lowerCAmelCase : Dict = self.ff(A_ )
if self.use_ada_layer_norm_zero:
_lowerCAmelCase : Dict = gate_mlp.unsqueeze(1 ) * ff_output
_lowerCAmelCase : Dict = ff_output + hidden_states
return hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : int , A_ : Optional[int] = None , A_ : int = 4 , A_ : float = 0.0 , A_ : str = "geglu" , A_ : bool = False , ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[Any] = int(dim * mult )
_lowerCAmelCase : Union[str, Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_lowerCAmelCase : Optional[Any] = GELU(A_ , A_ )
if activation_fn == "gelu-approximate":
_lowerCAmelCase : List[str] = GELU(A_ , A_ , approximate="tanh" )
elif activation_fn == "geglu":
_lowerCAmelCase : List[Any] = GEGLU(A_ , A_ )
elif activation_fn == "geglu-approximate":
_lowerCAmelCase : int = ApproximateGELU(A_ , A_ )
_lowerCAmelCase : Union[str, Any] = nn.ModuleList([] )
# project in
self.net.append(A_ )
# project dropout
self.net.append(nn.Dropout(A_ ) )
# project out
self.net.append(nn.Linear(A_ , A_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(A_ ) )
def __magic_name__ ( self : Optional[Any] , A_ : Dict ):
'''simple docstring'''
for module in self.net:
_lowerCAmelCase : List[Any] = module(A_ )
return hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , A_ : int , A_ : int , A_ : str = "none" ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Linear(A_ , A_ )
_lowerCAmelCase : List[str] = approximate
def __magic_name__ ( self : List[Any] , A_ : Any ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(A_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __magic_name__ ( self : int , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.proj(A_ )
_lowerCAmelCase : Dict = self.gelu(A_ )
return hidden_states
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , A_ : int , A_ : int ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = nn.Linear(A_ , dim_out * 2 )
def __magic_name__ ( self : Any , A_ : Dict ):
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(A_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __magic_name__ ( self : str , A_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.proj(A_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(A_ )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , A_ : int , A_ : int ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Optional[int] = nn.Linear(A_ , A_ )
def __magic_name__ ( self : Dict , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.proj(A_ )
return x * torch.sigmoid(1.702 * x )
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , A_ : List[str] , A_ : Optional[Any] ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[Any] = nn.Embedding(A_ , A_ )
_lowerCAmelCase : Dict = nn.SiLU()
_lowerCAmelCase : int = nn.Linear(A_ , embedding_dim * 2 )
_lowerCAmelCase : Optional[int] = nn.LayerNorm(A_ , elementwise_affine=A_ )
def __magic_name__ ( self : Tuple , A_ : List[Any] , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = self.linear(self.silu(self.emb(A_ ) ) )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = torch.chunk(A_ , 2 )
_lowerCAmelCase : Optional[Any] = self.norm(A_ ) * (1 + scale) + shift
return x
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : str , A_ : str ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = CombinedTimestepLabelEmbeddings(A_ , A_ )
_lowerCAmelCase : Optional[Any] = nn.SiLU()
_lowerCAmelCase : Optional[int] = nn.Linear(A_ , 6 * embedding_dim , bias=A_ )
_lowerCAmelCase : List[Any] = nn.LayerNorm(A_ , elementwise_affine=A_ , eps=1E-6 )
def __magic_name__ ( self : str , A_ : Any , A_ : Dict , A_ : int , A_ : List[str]=None ):
'''simple docstring'''
_lowerCAmelCase : int = self.linear(self.silu(self.emb(A_ , A_ , hidden_dtype=A_ ) ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = emb.chunk(6 , dim=1 )
_lowerCAmelCase : Any = self.norm(A_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , A_ : int , A_ : int , A_ : int , A_ : Optional[str] = None , A_ : float = 1E-5 ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[Any] = num_groups
_lowerCAmelCase : str = eps
if act_fn is None:
_lowerCAmelCase : str = None
else:
_lowerCAmelCase : List[str] = get_activation(A_ )
_lowerCAmelCase : Dict = nn.Linear(A_ , out_dim * 2 )
def __magic_name__ ( self : Dict , A_ : Any , A_ : Tuple ):
'''simple docstring'''
if self.act:
_lowerCAmelCase : str = self.act(A_ )
_lowerCAmelCase : Dict = self.linear(A_ )
_lowerCAmelCase : str = emb[:, :, None, None]
_lowerCAmelCase , _lowerCAmelCase : List[str] = emb.chunk(2 , dim=1 )
_lowerCAmelCase : Dict = F.group_norm(A_ , self.num_groups , eps=self.eps )
_lowerCAmelCase : Optional[Any] = x * (1 + scale) + shift
return x
| 503 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A__ ( A ):
"""simple docstring"""
def __magic_name__ ( self : List[Any] , A_ : float ):
'''simple docstring'''
return 0.0
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[int | float, int | float]:
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_lowerCAmelCase : Any = 512
_lowerCAmelCase : Tuple = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
_lowerCAmelCase : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Dict = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase : str = 20 * np.logaa(SCREAMING_SNAKE_CASE )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : Tuple = get_bounds(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(SCREAMING_SNAKE_CASE )
plt.show()
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_lowerCAmelCase : Tuple = 512
_lowerCAmelCase : Tuple = [1] + [0] * (size - 1)
_lowerCAmelCase : Dict = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
_lowerCAmelCase : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Tuple = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE , -2 * pi ) )
plt.show()
| 503 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'sentencepiece.bpe.model'}
snake_case_ = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
snake_case_ = {
'camembert-base': 5_1_2,
}
snake_case_ = '▁'
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
def __init__( self , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=["<s>NOTUSED", "</s>NOTUSED"] , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
SCREAMING_SNAKE_CASE_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
SCREAMING_SNAKE_CASE_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
SCREAMING_SNAKE_CASE_ : List[str] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
SCREAMING_SNAKE_CASE_ : str = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1]
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowercase__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowercase__ )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : Any = ""
SCREAMING_SNAKE_CASE_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Dict = []
else:
current_sub_tokens.append(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
return state
def __setstate__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
lowercase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , "wb" ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
| 421 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
snake_case_ = parser.parse_args()
snake_case_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 421 | 1 |
'''simple docstring'''
import math
def snake_case_ ( _lowerCAmelCase : int ) -> bool:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : Tuple = range(3 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=1 , **_lowerCAmelCase : str ) -> Dict:
UpperCAmelCase : Optional[Any] = factor * value
UpperCAmelCase : List[str] = value
while not is_prime(_lowerCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowerCAmelCase )
return value
| 528 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
lowerCamelCase__ = LEDConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self : List[str] , __snake_case : int , __snake_case : Dict=13 , __snake_case : Dict=7 , __snake_case : Union[str, Any]=True , __snake_case : Dict=False , __snake_case : List[str]=99 , __snake_case : str=32 , __snake_case : Optional[int]=2 , __snake_case : int=4 , __snake_case : str=37 , __snake_case : Optional[int]=0.1 , __snake_case : Any=0.1 , __snake_case : int=20 , __snake_case : List[str]=2 , __snake_case : List[str]=1 , __snake_case : str=0 , __snake_case : List[str]=4 , ) -> Optional[Any]:
UpperCAmelCase : List[str] = parent
UpperCAmelCase : Optional[int] = batch_size
UpperCAmelCase : int = seq_length
UpperCAmelCase : Optional[Any] = is_training
UpperCAmelCase : List[Any] = use_labels
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[int] = hidden_size
UpperCAmelCase : Union[str, Any] = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Optional[int] = intermediate_size
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Dict = pad_token_id
UpperCAmelCase : List[str] = bos_token_id
UpperCAmelCase : Tuple = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
UpperCAmelCase : Optional[int] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
UpperCAmelCase : Union[str, Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A ( self : int ) -> Optional[int]:
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase : Any = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
UpperCAmelCase : Optional[Any] = prepare_led_inputs_dict(__snake_case , __snake_case , __snake_case )
UpperCAmelCase : Any = tf.concat(
[tf.zeros_like(__snake_case )[:, :-1], tf.ones_like(__snake_case )[:, -1:]] , axis=-1 , )
UpperCAmelCase : Any = global_attention_mask
return config, inputs_dict
def A ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Any:
UpperCAmelCase : Optional[int] = TFLEDModel(config=__snake_case ).get_decoder()
UpperCAmelCase : List[str] = inputs_dict['''input_ids''']
UpperCAmelCase : Any = input_ids[:1, :]
UpperCAmelCase : Tuple = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase : str = 1
# first forward pass
UpperCAmelCase : Dict = model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
UpperCAmelCase , UpperCAmelCase : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase : Optional[Any] = model(__snake_case , attention_mask=__snake_case )[0]
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1E-3 )
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Dict=None , ) -> Optional[int]:
if attention_mask is None:
UpperCAmelCase : Dict = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def A ( self : str ) -> Tuple:
UpperCAmelCase : Optional[Any] = TFLEDModelTester(self )
UpperCAmelCase : Tuple = ConfigTester(self , config_class=__snake_case )
def A ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def A ( self : int ) -> List[str]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
def A ( self : Any ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Optional[Any] = tf.zeros_like(inputs_dict['''attention_mask'''] )
UpperCAmelCase : Optional[int] = 2
UpperCAmelCase : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = self.model_tester.seq_length
UpperCAmelCase : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__snake_case : List[Any] ):
UpperCAmelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__snake_case : int ):
UpperCAmelCase : List[Any] = [t.numpy() for t in outputs.encoder_attentions]
UpperCAmelCase : str = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = model_class(__snake_case )
UpperCAmelCase : Tuple = model(self._prepare_for_class(__snake_case , __snake_case ) )
UpperCAmelCase : Optional[int] = len(__snake_case )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
if self.is_encoder_decoder:
UpperCAmelCase : Optional[Any] = model_class(__snake_case )
UpperCAmelCase : Optional[int] = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_decoder_attentions_output(__snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = model_class(__snake_case )
UpperCAmelCase : Optional[Any] = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
# Check attention is always last and order is fine
UpperCAmelCase : Any = True
UpperCAmelCase : Any = True
UpperCAmelCase : List[str] = model_class(__snake_case )
UpperCAmelCase : Dict = model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__snake_case ) )
self.assertEqual(model.config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def A ( self : Any ) -> Union[str, Any]:
pass
def A ( self : str ) -> Any:
# TODO: Head-masking not yet implement
pass
def snake_case_ ( _lowerCAmelCase : str ) -> Tuple:
return tf.constant(_lowerCAmelCase , dtype=tf.intaa )
UpperCamelCase__: Tuple = 1E-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
UpperCAmelCase : Optional[int] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Dict = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Dict = prepare_led_inputs_dict(model.config , __snake_case , __snake_case )
UpperCAmelCase : Optional[Any] = model(**__snake_case )[0]
UpperCAmelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __snake_case )
# change to expected output here
UpperCAmelCase : List[str] = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-3 )
def A ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase : Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
UpperCAmelCase : Optional[int] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Optional[Any] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
UpperCAmelCase : Optional[int] = prepare_led_inputs_dict(model.config , __snake_case , __snake_case )
UpperCAmelCase : Any = model(**__snake_case )[0]
UpperCAmelCase : Union[str, Any] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __snake_case )
# change to expected output here
UpperCAmelCase : Dict = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-3 , rtol=1E-3 )
| 528 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError("CUDA out of memory." )
class A ( nn.Module ):
def __init__( self : Dict ):
"""simple docstring"""
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : int ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__magic_name__ ) ) )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__ : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__magic_name__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__magic_name__ , [128, 64, 32, 16, 8] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__ : Tuple , __magic_name__ : Tuple ):
nonlocal batch_sizes
batch_sizes.append(__magic_name__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
lowerCAmelCase__ ,lowerCAmelCase__ = mock_training_loop_function("hello" )
self.assertListEqual(__magic_name__ , [128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__magic_name__ : Optional[int] ):
pass
with self.assertRaises(__magic_name__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__magic_name__ : Optional[Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__magic_name__ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(__magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__magic_name__ ) as cm:
mock_training_loop_function(128 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__magic_name__ : Optional[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__magic_name__ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = torch.cuda.memory_allocated()
lowerCAmelCase__ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __magic_name__ )
lowerCAmelCase__ = release_memory(__magic_name__ )
self.assertEqual(torch.cuda.memory_allocated() , __magic_name__ )
| 48 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20}
__SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : int = parent
__SCREAMING_SNAKE_CASE : Optional[int] = batch_size
__SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : int = min_resolution
__SCREAMING_SNAKE_CASE : Optional[int] = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Union[str, Any] = size
__SCREAMING_SNAKE_CASE : str = do_center_crop
__SCREAMING_SNAKE_CASE : Any = crop_size
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''do_center_crop''' ) )
self.assertTrue(hasattr(_A , '''crop_size''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 74 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class _UpperCAmelCase ( _lowerCamelCase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_INIT_CONFIGURATION
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = BertTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
A_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
A_ : int = getattr(a__ , normalizer_state.pop("""type""" ) )
A_ : Optional[int] = do_lower_case
A_ : Dict = strip_accents
A_ : Optional[Any] = tokenize_chinese_chars
A_ : int = normalizer_class(**a__ )
A_ : List[Any] = do_lower_case
def _lowerCamelCase ( self , a__ , a__=None ):
A_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , a__ , a__ = None ):
A_ : str = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , a__ , a__ = None ):
A_ : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 481 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 481 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ort.SessionOptions()
_SCREAMING_SNAKE_CASE =False
return options
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
_SCREAMING_SNAKE_CASE =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
_SCREAMING_SNAKE_CASE ='''A red cat sitting on a park bench'''
_SCREAMING_SNAKE_CASE =np.random.RandomState(0 )
_SCREAMING_SNAKE_CASE =pipe(
prompt=_A , image=_A , mask_image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=_A , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 255 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def UpperCAmelCase ( A : Tuple ):
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
_UpperCAmelCase = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
_UpperCAmelCase = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase = key[key.find('patch_embed' ) + len('patch_embed' )]
_UpperCAmelCase = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(A )-1}' )
if "norm" in key:
_UpperCAmelCase = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
_UpperCAmelCase = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(A )-1}' )
if "layer_norm1" in key:
_UpperCAmelCase = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
_UpperCAmelCase = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase = key[key.find('block' ) + len('block' )]
_UpperCAmelCase = key.replace(f'block{idx}' , f'block.{int(A )-1}' )
if "attn.q" in key:
_UpperCAmelCase = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
_UpperCAmelCase = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
_UpperCAmelCase = key.replace('attn' , 'attention.self' )
if "fc1" in key:
_UpperCAmelCase = key.replace('fc1' , 'dense1' )
if "fc2" in key:
_UpperCAmelCase = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
_UpperCAmelCase = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
_UpperCAmelCase = key.replace('linear_fuse.conv' , 'linear_fuse' )
_UpperCAmelCase = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase = key[key.find('linear_c' ) + len('linear_c' )]
_UpperCAmelCase = key.replace(f'linear_c{idx}' , f'linear_c.{int(A )-1}' )
if "bot_conv" in key:
_UpperCAmelCase = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
_UpperCAmelCase = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
_UpperCAmelCase = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
_UpperCAmelCase = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
_UpperCAmelCase = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
_UpperCAmelCase = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
_UpperCAmelCase = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
_UpperCAmelCase = key.replace('module.last_layer_depth' , 'head.head' )
_UpperCAmelCase = value
return new_state_dict
def UpperCAmelCase ( A : Union[str, Any] , A : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_UpperCAmelCase = state_dict.pop(f'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(A , stream=A ).raw )
return image
@torch.no_grad()
def UpperCAmelCase ( A : int , A : List[Any] , A : Optional[int]=False , A : int=None ):
'''simple docstring'''
_UpperCAmelCase = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase = GLPNImageProcessor()
# prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
_UpperCAmelCase = torch.load(A , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(A )
# key and value matrices need special treatment
read_in_k_v(A , A )
# create HuggingFace model and load state dict
_UpperCAmelCase = GLPNForDepthEstimation(A )
model.load_state_dict(A )
model.eval()
# forward pass
_UpperCAmelCase = model(A )
_UpperCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
_UpperCAmelCase = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'Unknown model name: {model_name}' )
_UpperCAmelCase = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , A , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(A , A ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=A , )
image_processor.push_to_hub(
repo_path_or_name=Path(A , A ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=A , )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
lowercase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 573 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = [False] * len(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = []
queue.append(__UpperCamelCase )
UpperCAmelCase__ : str = True
while queue:
UpperCAmelCase__ : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCamelCase )
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Union[str, Any] = u
return visited[t]
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = [-1] * (len(__UpperCamelCase ))
UpperCAmelCase__ : Tuple = 0
while bfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Any = float("""Inf""" )
UpperCAmelCase__ : List[str] = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase__ : Tuple = min(__UpperCamelCase , graph[parent[s]][s] )
UpperCAmelCase__ : Tuple = parent[s]
max_flow += path_flow
UpperCAmelCase__ : Optional[int] = sink
while v != source:
UpperCAmelCase__ : Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase__ : Optional[Any] = parent[v]
return max_flow
__UpperCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCAmelCase, __UpperCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 194 |
"""simple docstring"""
__UpperCAmelCase = 'Tobias Carryer'
from time import time
class __lowercase :
def __init__( self : Union[str, Any] ,A : Dict ,A : Optional[int] ,A : Union[str, Any] ,A : int=int(time() ) ): # noqa: B008
'''simple docstring'''
UpperCAmelCase__ : int = multiplier
UpperCAmelCase__ : Union[str, Any] = increment
UpperCAmelCase__ : Dict = modulo
UpperCAmelCase__ : Tuple = seed
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 194 | 1 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = ""
a__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a__ : str = None # compression type in fsspec. ex: "gzip"
a__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[int] , _lowercase : str = "" , _lowercase : Optional[str] = None , _lowercase : Optional[dict] = None , **_lowercase : Dict ):
super().__init__(self , **_lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCAmelCase = fsspec.open(
_lowercase , mode='''rb''' , protocol=_lowercase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCAmelCase = os.path.basename(self.file.path.split('''::''' )[0] )
__UpperCAmelCase = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__UpperCAmelCase = None
@classmethod
def a ( cls : Tuple , _lowercase : Any ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowercase ).lstrip('''/''' )
def a ( self : Dict ):
if self.dir_cache is None:
__UpperCAmelCase = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__UpperCAmelCase = {f['''name''']: f}
def a ( self : Optional[int] , _lowercase : str ):
return self.file.open().read()
def a ( self : Optional[Any] , _lowercase : str , _lowercase : str = "rb" , _lowercase : Optional[Any]=None , _lowercase : Any=True , _lowercase : Tuple=None , **_lowercase : Optional[Any] , ):
__UpperCAmelCase = self._strip_protocol(_lowercase )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "bz2"
a__ : List[str] = "bz2"
a__ : List[str] = ".bz2"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Dict = "gzip"
a__ : Tuple = "gzip"
a__ : Optional[int] = ".gz"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = "lz4"
a__ : List[str] = "lz4"
a__ : Union[str, Any] = ".lz4"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = "xz"
a__ : Optional[Any] = "xz"
a__ : List[str] = ".xz"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "zstd"
a__ : Any = "zstd"
a__ : Union[str, Any] = ".zst"
def __init__( self : Optional[Any] , _lowercase : str , _lowercase : str = "rb" , _lowercase : Optional[str] = None , _lowercase : Optional[dict] = None , _lowercase : int = DEFAULT_BLOCK_SIZE , **_lowercase : Any , ):
super().__init__(
fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCAmelCase = self.file.__enter__
class _UpperCAmelCase :
def __init__( self : int , _lowercase : Optional[int] ):
__UpperCAmelCase = file_
def __enter__( self : Tuple ):
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *_lowercase : int , **_lowercase : int ):
self._file.__exit__(*_lowercase , **_lowercase )
def __iter__( self : Optional[Any] ):
return iter(self._file )
def a ( self : int ):
return next(self._file )
def __getattr__( self : Dict , _lowercase : Union[str, Any] ):
return getattr(self._file , _lowercase )
def fixed_enter(*_lowercase : Union[str, Any] , **_lowercase : Optional[Any] ):
return WrappedFile(_enter(*_lowercase , **_lowercase ) )
__UpperCAmelCase = fixed_enter
| 49 |
'''simple docstring'''
A_ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset([])
A_ = frozenset(["image"])
A_ = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image"])
A_ = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "negative_prompt"])
A_ = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
A_ = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image", "mask_image"])
A_ = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["example_image", "image", "mask_image"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset(["input_tokens"])
A_ = frozenset(["input_tokens"])
| 143 | 0 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCamelCase ( __lowerCAmelCase ):
def __init__( self : Dict , __snake_case : Callable , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[dict] = None , __snake_case : Optional[int] = None , **__snake_case : Dict , ):
'''simple docstring'''
super().__init__(
features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , num_proc=lowerCamelCase__ , **lowerCamelCase__ , )
_snake_case: Union[str, Any] = Generator(
cache_dir=lowerCamelCase__ , features=lowerCamelCase__ , generator=lowerCamelCase__ , gen_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
if self.streaming:
_snake_case: Union[str, Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
_snake_case: Union[str, Any] = None
_snake_case: str = None
_snake_case: Optional[Any] = None
_snake_case: int = None
self.builder.download_and_prepare(
download_config=lowerCamelCase__ , download_mode=lowerCamelCase__ , verification_mode=lowerCamelCase__ , base_path=lowerCamelCase__ , num_proc=self.num_proc , )
_snake_case: List[str] = self.builder.as_dataset(
split='train' , verification_mode=lowerCamelCase__ , in_memory=self.keep_in_memory )
return dataset | 718 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A : Dict = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 273 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a (_lowercase ):
"""simple docstring"""
__UpperCAmelCase : str = "segformer"
def __init__( self : str , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=4 , lowerCamelCase : str=[2, 2, 2, 2] , lowerCamelCase : Optional[Any]=[8, 4, 2, 1] , lowerCamelCase : List[str]=[32, 64, 160, 256] , lowerCamelCase : Tuple=[7, 3, 3, 3] , lowerCamelCase : Any=[4, 2, 2, 2] , lowerCamelCase : Tuple=[1, 2, 5, 8] , lowerCamelCase : List[str]=[4, 4, 4, 4] , lowerCamelCase : Any="gelu" , lowerCamelCase : Dict=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : str=0.1 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : List[Any]=1E-6 , lowerCamelCase : int=256 , lowerCamelCase : Tuple=255 , **lowerCamelCase : Union[str, Any] , ) -> Union[str, Any]:
super().__init__(**__lowerCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"
" removed, as the behaviour will default to that of reshape_last_stage = True." , __lowerCamelCase , )
__snake_case : str = num_channels
__snake_case : List[Any] = num_encoder_blocks
__snake_case : Optional[Any] = depths
__snake_case : Optional[int] = sr_ratios
__snake_case : List[Any] = hidden_sizes
__snake_case : Dict = patch_sizes
__snake_case : int = strides
__snake_case : Dict = mlp_ratios
__snake_case : Dict = num_attention_heads
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : str = classifier_dropout_prob
__snake_case : str = initializer_range
__snake_case : List[str] = drop_path_rate
__snake_case : List[Any] = layer_norm_eps
__snake_case : Optional[int] = decoder_hidden_size
__snake_case : Tuple = kwargs.get("reshape_last_stage" , __lowerCamelCase )
__snake_case : Optional[int] = semantic_loss_ignore_index
class a (_lowercase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = version.parse("1.11" )
@property
def __snake_case ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __snake_case ( self : Optional[int] ) -> float:
return 1E-4
@property
def __snake_case ( self : int ) -> int:
return 12
| 81 | import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_snake_case = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = torch.load(snake_case__, map_location="cpu" )
return sd
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__=rename_keys_prefix ) -> List[Any]:
__UpperCAmelCase : Optional[int] = OrderedDict()
__UpperCAmelCase : List[str] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Optional[int] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : List[Any] = new_key.replace(name_pair[0], name_pair[1] )
__UpperCAmelCase : Optional[Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : Optional[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : int = "pretraining"
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : int = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Tuple = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 512}
__UpperCAmelCase : List[str] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Optional[int] = {"visual_embedding_dim": 2048}
__UpperCAmelCase : str = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCAmelCase : str = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCAmelCase : Union[str, Any] = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : str = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCAmelCase : Optional[int] = "nlvr"
__UpperCAmelCase : Optional[int] = VisualBertConfig(**snake_case__ )
# Load State Dict
__UpperCAmelCase : str = load_state_dict(snake_case__ )
__UpperCAmelCase : int = get_new_dict(snake_case__, snake_case__ )
if model_type == "pretraining":
__UpperCAmelCase : Union[str, Any] = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
__UpperCAmelCase : Union[str, Any] = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
__UpperCAmelCase : str = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
__UpperCAmelCase : int = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
_snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 382 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self : List[Any] ) -> int:
_UpperCamelCase ='''ylacombe/bark-small'''
_UpperCamelCase =tempfile.mkdtemp()
_UpperCamelCase ='''en_speaker_1'''
_UpperCamelCase ='''This is a test string'''
_UpperCamelCase ='''speaker_embeddings_path.json'''
_UpperCamelCase ='''speaker_embeddings'''
def UpperCamelCase__ ( self : int , **UpperCamelCase__ : str ) -> Optional[int]:
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Dict:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase =self.get_tokenizer()
_UpperCamelCase =BarkProcessor(tokenizer=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
_UpperCamelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_UpperCamelCase =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase =BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ ( self : Union[str, Any] ) -> str:
_UpperCamelCase =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_UpperCamelCase =35
_UpperCamelCase =2
_UpperCamelCase =8
_UpperCamelCase ={
'''semantic_prompt''': np.ones(UpperCamelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_UpperCamelCase =processor(text=self.input_string , voice_preset=UpperCamelCase__ )
_UpperCamelCase =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_UpperCamelCase =os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCamelCase__ , **UpperCamelCase__ )
_UpperCamelCase =processor(text=self.input_string , voice_preset=UpperCamelCase__ )
_UpperCamelCase =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_UpperCamelCase =processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ ( self : Dict ) -> str:
_UpperCamelCase =self.get_tokenizer()
_UpperCamelCase =BarkProcessor(tokenizer=UpperCamelCase__ )
_UpperCamelCase =processor(text=self.input_string )
_UpperCamelCase =tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 713 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : str = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__lowerCamelCase : Tuple = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _a ():
"""simple docstring"""
_UpperCamelCase =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_UpperCamelCase =bs[:]
_UpperCamelCase =0
for b in range(2**8 ):
if b not in bs:
bs.append(__SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
_UpperCamelCase =[chr(__SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =set()
_UpperCamelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCamelCase =char
return pairs
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any="replace" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : int=False , **UpperCamelCase__ : int , ) -> Tuple:
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle:
_UpperCamelCase =json.load(UpperCamelCase__ )
_UpperCamelCase ={v: k for k, v in self.encoder.items()}
_UpperCamelCase =errors # how to handle errors in decoding
_UpperCamelCase =bytes_to_unicode()
_UpperCamelCase ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding='''utf-8''' ) as merges_handle:
_UpperCamelCase =merges_handle.read().split('''\n''' )[1:-1]
_UpperCamelCase =[tuple(merge.split() ) for merge in bpe_merges]
_UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
_UpperCamelCase ={}
_UpperCamelCase =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCamelCase =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self : Tuple ) -> List[str]:
return len(self.encoder )
def UpperCamelCase__ ( self : List[Any] ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[Any] ) -> List[Any]:
if token in self.cache:
return self.cache[token]
_UpperCamelCase =tuple(UpperCamelCase__ )
_UpperCamelCase =get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
_UpperCamelCase =min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCamelCase , _UpperCamelCase =bigram
_UpperCamelCase =[]
_UpperCamelCase =0
while i < len(UpperCamelCase__ ):
try:
_UpperCamelCase =word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCamelCase =j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCamelCase =tuple(UpperCamelCase__ )
_UpperCamelCase =new_word
if len(UpperCamelCase__ ) == 1:
break
else:
_UpperCamelCase =get_pairs(UpperCamelCase__ )
_UpperCamelCase =''' '''.join(UpperCamelCase__ )
_UpperCamelCase =word
return word
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
_UpperCamelCase =[]
for token in re.findall(self.pat , UpperCamelCase__ ):
_UpperCamelCase =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(''' ''' ) )
return bpe_tokens
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : str ) -> int:
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : List[Any] ) -> int:
return self.decoder.get(UpperCamelCase__ )
def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Any ) -> List[Any]:
_UpperCamelCase =''''''.join(UpperCamelCase__ )
_UpperCamelCase =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def UpperCamelCase__ ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase =os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase =os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' )
_UpperCamelCase =0
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_UpperCamelCase =token_index
writer.write(''' '''.join(UpperCamelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCamelCase =[self.cls_token_id]
_UpperCamelCase =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def UpperCamelCase__ ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
_UpperCamelCase =[self.sep_token_id]
_UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=False , **UpperCamelCase__ : str ) -> Dict:
_UpperCamelCase =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
_UpperCamelCase =''' ''' + text
return (text, kwargs)
def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , ) -> dict:
_UpperCamelCase =super()._pad(
encoded_inputs=UpperCamelCase__ , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
_UpperCamelCase ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCamelCase =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCamelCase =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase__ )
if needs_to_be_padded:
_UpperCamelCase =len(UpperCamelCase__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCamelCase =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCamelCase =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 271 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _a ( __a ):
"""simple docstring"""
A_ = '''biogpt'''
def __init__( self : int , lowercase_ : Dict=42_384 , lowercase_ : str=1_024 , lowercase_ : Optional[Any]=24 , lowercase_ : Dict=16 , lowercase_ : Any=4_096 , lowercase_ : List[Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=1_024 , lowercase_ : int=0.0_2 , lowercase_ : Dict=1e-12 , lowercase_ : List[str]=True , lowercase_ : Dict=True , lowercase_ : Any=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=1 , lowercase_ : str=0 , lowercase_ : Union[str, Any]=2 , **lowercase_ : Tuple , ):
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = scale_embedding
lowercase_ = use_cache
lowercase_ = layerdrop
lowercase_ = activation_dropout
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 451 | '''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : str =logging.get_logger(__name__)
snake_case_ : int ={
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : Any = 'mobilenet_v2'
def __init__( self , lowercase__=3 , lowercase__=224 , lowercase__=1.0 , lowercase__=8 , lowercase__=8 , lowercase__=6 , lowercase__=32 , lowercase__=True , lowercase__=True , lowercase__="relu6" , lowercase__=True , lowercase__=0.8 , lowercase__=0.02 , lowercase__=0.001 , lowercase__=255 , **lowercase__ , ) -> List[Any]:
super().__init__(**lowercase__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__A = num_channels
__A = image_size
__A = depth_multiplier
__A = depth_divisible_by
__A = min_depth
__A = expand_ratio
__A = output_stride
__A = first_layer_is_expansion
__A = finegrained_output
__A = hidden_act
__A = tf_padding
__A = classifier_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = semantic_loss_ignore_index
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : Optional[Any] = version.parse('1.11' )
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _lowerCamelCase ( self ) -> float:
return 1e-4
| 707 |
from collections.abc import Generator
def UpperCAmelCase ( ):
'''simple docstring'''
__A , __A = 0, 1
while True:
__A , __A = b, a + b
yield b
def UpperCAmelCase ( lowerCAmelCase__ = 1000 ):
'''simple docstring'''
__A = 1
__A = fibonacci_generator()
while len(str(next(lowerCAmelCase__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 205 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : int = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
A : Optional[int] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A : Dict = model(lowerCAmelCase_ )['''last_hidden_state''']
A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCAmelCase_ )
# compare the actual values for a slice.
A : Optional[int] = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 256 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['MobileViTFeatureExtractor']
_lowerCamelCase : Union[str, Any] = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = ["model.decoder.embed_positions.weights"]
def UpperCamelCase( UpperCAmelCase_ ):
if "emb" in name:
UpperCAmelCase : Any = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
UpperCAmelCase : Optional[int] = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
UpperCAmelCase : Any = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
UpperCAmelCase : Optional[int] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
UpperCAmelCase : List[Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
UpperCAmelCase : List[str] = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
UpperCAmelCase : Any = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
UpperCAmelCase : List[str] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
UpperCAmelCase : Tuple = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = list(state_dict.keys() )
UpperCAmelCase : int = {}
for key in keys:
UpperCAmelCase : str = state_dict.pop(lowerCamelCase_ )
UpperCAmelCase : Union[str, Any] = rename_keys(lowerCamelCase_ )
if "in_proj_weight" in key:
# split fused qkv proj
UpperCAmelCase : List[Any] = val[:hidden_size, :]
UpperCAmelCase : Union[str, Any] = val[hidden_size : 2 * hidden_size, :]
UpperCAmelCase : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
UpperCAmelCase : str = val
else:
UpperCAmelCase : Union[str, Any] = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase( UpperCAmelCase_ ):
if checkpoint == "small":
# default config values
UpperCAmelCase : Optional[int] = 10_24
UpperCAmelCase : Tuple = 24
UpperCAmelCase : Dict = 16
elif checkpoint == "medium":
UpperCAmelCase : int = 15_36
UpperCAmelCase : int = 48
UpperCAmelCase : int = 24
elif checkpoint == "large":
UpperCAmelCase : Optional[Any] = 20_48
UpperCAmelCase : Any = 48
UpperCAmelCase : Any = 32
else:
raise ValueError(F"""Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.""" )
UpperCAmelCase : Dict = MusicgenDecoderConfig(
hidden_size=lowerCamelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , )
return config
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="cpu" ):
UpperCAmelCase : Union[str, Any] = MusicGen.get_pretrained(lowerCamelCase_ , device=lowerCamelCase_ )
UpperCAmelCase : List[Any] = decoder_config_from_checkpoint(lowerCamelCase_ )
UpperCAmelCase : Any = fairseq_model.lm.state_dict()
UpperCAmelCase : Dict = rename_state_dict(
lowerCamelCase_ , hidden_size=decoder_config.hidden_size )
UpperCAmelCase : int = TaEncoderModel.from_pretrained('t5-base' )
UpperCAmelCase : Any = EncodecModel.from_pretrained('facebook/encodec_32khz' )
UpperCAmelCase : Dict = MusicgenForCausalLM(lowerCamelCase_ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
UpperCAmelCase : int = decoder.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCamelCase_ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
UpperCAmelCase : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase_ , audio_encoder=lowerCamelCase_ , decoder=lowerCamelCase_ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase_ )
# check we can do a forward pass
UpperCAmelCase : str = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
UpperCAmelCase : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('t5-base' )
UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
UpperCAmelCase : str = MusicgenProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
# set the appropriate bos/pad token ids
UpperCAmelCase : int = 20_48
UpperCAmelCase : Optional[int] = 20_48
# set other default generation config params
UpperCAmelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
UpperCAmelCase : List[str] = True
UpperCAmelCase : str = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCamelCase_ )
processor.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
lowercase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 713 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = """pix2struct_text_model"""
UpperCAmelCase_ : Union[str, Any] = ["""past_key_values"""]
UpperCAmelCase_ : Optional[int] = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , lowercase_ : str=50_244 , lowercase_ : Tuple=768 , lowercase_ : List[Any]=64 , lowercase_ : List[Any]=2_048 , lowercase_ : Optional[Any]=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : Union[str, Any]=32 , lowercase_ : List[str]=128 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=1E-6 , lowercase_ : Union[str, Any]=1.0 , lowercase_ : Dict="gelu_new" , lowercase_ : Any=0 , lowercase_ : Any=False , lowercase_ : List[Any]=0 , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , lowercase_ : List[Any]=True , **lowercase_ : Union[str, Any] , ) -> Dict:
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[Any] = d_kv
UpperCAmelCase : Any = d_ff
UpperCAmelCase : List[str] = num_layers
UpperCAmelCase : str = num_heads
UpperCAmelCase : List[Any] = relative_attention_num_buckets
UpperCAmelCase : Tuple = relative_attention_max_distance
UpperCAmelCase : str = dropout_rate
UpperCAmelCase : Optional[int] = layer_norm_epsilon
UpperCAmelCase : int = initializer_factor
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : List[Any] = eos_token_id
UpperCAmelCase : Union[str, Any] = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase : List[str] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : int = """pix2struct_vision_model"""
def __init__( self : str , lowercase_ : Any=768 , lowercase_ : Union[str, Any]=768 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Tuple=64 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : int="gelu_new" , lowercase_ : List[Any]=1E-6 , lowercase_ : Optional[int]=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : str=1E-10 , lowercase_ : Dict=1.0 , lowercase_ : int=4_096 , lowercase_ : Tuple=32 , lowercase_ : Any=128 , **lowercase_ : Any , ) -> Tuple:
super().__init__(**lowercase_ )
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : Any = patch_embed_hidden_size
UpperCAmelCase : Optional[int] = d_ff
UpperCAmelCase : Dict = dropout_rate
UpperCAmelCase : Dict = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : str = initializer_factor
UpperCAmelCase : str = attention_dropout
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : Union[str, Any] = dense_act_fn
UpperCAmelCase : Dict = seq_len
UpperCAmelCase : Optional[int] = relative_attention_num_buckets
UpperCAmelCase : Union[str, Any] = relative_attention_max_distance
UpperCAmelCase : str = d_kv
@classmethod
def UpperCAmelCase_ ( cls : Optional[Any] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowercase_ )
UpperCAmelCase , UpperCAmelCase : Tuple = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
UpperCAmelCase : List[str] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """pix2struct"""
UpperCAmelCase_ : Dict = True
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : Optional[Any]=1.0 , lowercase_ : List[str]=0.02 , lowercase_ : str=False , lowercase_ : Union[str, Any]=False , lowercase_ : Tuple=True , **lowercase_ : Optional[Any] , ) -> str:
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
UpperCAmelCase : Optional[int] = {}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase : List[str] = {}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
UpperCAmelCase : Optional[Any] = PixaStructTextConfig(**lowercase_ )
UpperCAmelCase : Union[str, Any] = PixaStructVisionConfig(**lowercase_ )
UpperCAmelCase : Optional[Any] = self.text_config.decoder_start_token_id
UpperCAmelCase : str = self.text_config.pad_token_id
UpperCAmelCase : Optional[int] = self.text_config.eos_token_id
UpperCAmelCase : Union[str, Any] = initializer_factor
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : int = self.initializer_range
UpperCAmelCase : str = is_vqa
@classmethod
def UpperCAmelCase_ ( cls : Tuple , lowercase_ : PixaStructTextConfig , lowercase_ : PixaStructVisionConfig , **lowercase_ : str ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase : Optional[int] = self.text_config.to_dict()
UpperCAmelCase : Dict = self.vision_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 695 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = limit + 1
UpperCAmelCase_ = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
UpperCAmelCase_ = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(f"{solution() = }")
| 78 | '''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 | 1 |
'''simple docstring'''
def a_ ( __magic_name__ = 100 ) -> int:
"""simple docstring"""
snake_case : int = set()
snake_case : Optional[Any] = 0
snake_case : int = n + 1 # maximum limit
for a in range(2 , __lowerCAmelCase ):
for b in range(2 , __lowerCAmelCase ):
snake_case : List[str] = a**b # calculates the current power
collect_powers.add(__lowerCAmelCase ) # adds the result to the set
return len(__lowerCAmelCase )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 711 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_a : Optional[Any] = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
_a : str = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
_a : List[Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowerCAmelCase( self : Any ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[str]="auto" , UpperCAmelCase__ : Tuple=-1 , UpperCAmelCase__ : Optional[int]=0.9 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : List[Any]=500 , UpperCAmelCase__ : Union[str, Any]="gpt2-large" , UpperCAmelCase__ : Optional[Any]=-1 , UpperCAmelCase__ : int=1_024 , UpperCAmelCase__ : List[Any]=25 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=25 , ):
"""simple docstring"""
snake_case : List[str] = compute_mauve(
p_text=UpperCAmelCase__ , q_text=UpperCAmelCase__ , p_features=UpperCAmelCase__ , q_features=UpperCAmelCase__ , p_tokens=UpperCAmelCase__ , q_tokens=UpperCAmelCase__ , num_buckets=UpperCAmelCase__ , pca_max_data=UpperCAmelCase__ , kmeans_explained_var=UpperCAmelCase__ , kmeans_num_redo=UpperCAmelCase__ , kmeans_max_iter=UpperCAmelCase__ , featurize_model_name=UpperCAmelCase__ , device_id=UpperCAmelCase__ , max_text_length=UpperCAmelCase__ , divergence_curve_discretization_size=UpperCAmelCase__ , mauve_scaling_factor=UpperCAmelCase__ , verbose=UpperCAmelCase__ , seed=UpperCAmelCase__ , )
return out
| 84 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class UpperCamelCase (datasets.BuilderConfig ):
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
def UpperCamelCase ( _A , _A , ) -> Optional[int]:
import pyspark
def generate_fn():
lowercase : Dict = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase : Any = df_with_partition_id.select("""*""" ).where(F"""part_id = {partition_id}""" ).drop("""part_id""" )
lowercase : List[Any] = partition_df.collect()
lowercase : Optional[int] = 0
for row in rows:
yield F"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class UpperCamelCase (_BaseExamplesIterable ):
def __init__( self :Any , __magic_name__ :"pyspark.sql.DataFrame" , __magic_name__ :Union[str, Any]=None , ) ->List[Any]:
lowercase : List[Any] = df
lowercase : Union[str, Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self :List[str] ) ->List[str]:
yield from self.generate_examples_fn()
def __snake_case ( self :Optional[Any] , __magic_name__ :np.random.Generator ) ->"SparkExamplesIterable":
lowercase : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__magic_name__ )
return SparkExamplesIterable(self.df , partition_order=__magic_name__ )
def __snake_case ( self :Any , __magic_name__ :int , __magic_name__ :int ) ->"SparkExamplesIterable":
lowercase : Dict = self.split_shard_indices_by_worker(__magic_name__ , __magic_name__ )
return SparkExamplesIterable(self.df , partition_order=__magic_name__ )
@property
def __snake_case ( self :int ) ->int:
return len(self.partition_order )
class UpperCamelCase (datasets.DatasetBuilder ):
_SCREAMING_SNAKE_CASE : Tuple = SparkConfig
def __init__( self :Dict , __magic_name__ :"pyspark.sql.DataFrame" , __magic_name__ :str = None , __magic_name__ :str = None , **__magic_name__ :str , ) ->str:
import pyspark
lowercase : List[str] = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase : int = df
lowercase : Optional[int] = working_dir
super().__init__(
cache_dir=__magic_name__ , config_name=str(self.df.semanticHash() ) , **__magic_name__ , )
def __snake_case ( self :Optional[Any] ) ->Any:
# Returns the path of the created file.
def create_cache_and_write_probe(__magic_name__ :List[Any] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__magic_name__ )
lowercase : Dict = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__magic_name__ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__magic_name__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
return datasets.DatasetInfo(features=self.config.features )
def __snake_case ( self :int , __magic_name__ :datasets.download.download_manager.DownloadManager ) ->str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __snake_case ( self :Any , __magic_name__ :int ) ->Any:
import pyspark
def get_arrow_batch_size(__magic_name__ :Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase : Tuple = self.df.count()
lowercase : List[str] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase : Optional[Any] = (
self.df.limit(__magic_name__ )
.repartition(1 )
.mapInArrow(__magic_name__ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase : Any = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase : Optional[Any] = min(__magic_name__ , int(approx_total_size / max_shard_size ) )
lowercase : Dict = self.df.repartition(__magic_name__ )
def __snake_case ( self :str , __magic_name__ :str , __magic_name__ :str , __magic_name__ :int , ) ->Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
lowercase : int = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase : Optional[Any] = os.path.join(self._working_dir , os.path.basename(__magic_name__ ) ) if self._working_dir else fpath
lowercase : List[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase : Tuple = self.config.features
lowercase : List[str] = self._writer_batch_size
lowercase : Any = self._fs.storage_options
def write_arrow(__magic_name__ :str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase : Tuple = pyspark.TaskContext().taskAttemptId()
lowercase : Union[str, Any] = next(__magic_name__ , __magic_name__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase : Optional[Any] = 0
lowercase : Optional[int] = writer_class(
features=__magic_name__ , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__magic_name__ , storage_options=__magic_name__ , embed_local_files=__magic_name__ , )
lowercase : List[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(__magic_name__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase , lowercase : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase : Tuple = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=__magic_name__ , storage_options=__magic_name__ , embed_local_files=__magic_name__ , )
lowercase : Optional[Any] = pa.Table.from_batches([batch] )
writer.write_table(__magic_name__ )
if writer._num_bytes > 0:
lowercase , lowercase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__magic_name__ ) ):
lowercase : Optional[Any] = os.path.join(os.path.dirname(__magic_name__ ) , os.path.basename(__magic_name__ ) )
shutil.move(__magic_name__ , __magic_name__ )
lowercase : List[str] = (
self.df.mapInArrow(__magic_name__ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __snake_case ( self :Dict , __magic_name__ :"datasets.SplitGenerator" , __magic_name__ :str = "arrow" , __magic_name__ :Optional[Union[str, int]] = None , __magic_name__ :Optional[int] = None , **__magic_name__ :Dict , ) ->Union[str, Any]:
self._validate_cache_dir()
lowercase : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__magic_name__ )
lowercase : str = not is_remote_filesystem(self._fs )
lowercase : int = os.path.join if is_local else posixpath.join
lowercase : Union[str, Any] = """-TTTTT-SSSSS-of-NNNNN"""
lowercase : List[Any] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
lowercase : Optional[Any] = path_join(self._output_dir , __magic_name__ )
lowercase : str = 0
lowercase : Tuple = 0
lowercase : Tuple = 0
lowercase : List[str] = []
lowercase : str = []
for task_id, content in self._prepare_split_single(__magic_name__ , __magic_name__ , __magic_name__ ):
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : List[Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__magic_name__ )
lowercase : str = total_num_examples
lowercase : Optional[Any] = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
lowercase : Optional[int] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase : Optional[int] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__magic_name__ :int , __magic_name__ :int , __magic_name__ :int , ):
rename(
__magic_name__ , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , )
lowercase : str = []
lowercase : Optional[Any] = 0
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = task_id_and_num_shards[i]
for shard_id in range(__magic_name__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__magic_name__ , len(__magic_name__ ) ).map(lambda __magic_name__ : _rename_shard(*__magic_name__ ) ).collect()
else:
# don't use any pattern
lowercase : str = 0
lowercase : Tuple = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(__magic_name__ , """""" ) , )
def __snake_case ( self :Union[str, Any] , __magic_name__ :"datasets.SplitGenerator" , ) ->SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 264 |
"""simple docstring"""
def UpperCamelCase ( _A ) -> int:
lowercase : Dict = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase ( _A = 100 ) -> int:
lowercase : Union[str, Any] = 1
lowercase : Tuple = 2
for i in range(2 , max_n + 1 ):
lowercase : Any = pre_numerator
lowercase : Dict = 2 * i // 3 if i % 3 == 0 else 1
lowercase : Optional[Any] = cur_numerator
lowercase : str = e_cont * pre_numerator + temp
return sum_digits(_A )
if __name__ == "__main__":
print(F'{solution() = }')
| 264 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class UpperCamelCase__ (a ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' ,a ,)
| 9 |
'''simple docstring'''
from math import factorial
UpperCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def A__ ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameter number must be int""" )
if number < 0:
raise ValueError("""Parameter number must be greater than or equal to 0""" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCAmelCase ) )
def A__ ( __lowerCAmelCase : int = 60 , __lowerCAmelCase : int = 100_0000 ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Parameters chain_length and number_limit must be int""" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"""Parameters chain_length and number_limit must be greater than 0""" )
# the counter for the chains with the exact desired length
lowerCamelCase__ = 0
# the cached sizes of the previous chains
lowerCamelCase__ = {}
for start_chain_element in range(1 , __lowerCAmelCase ):
# The temporary set will contain the elements of the chain
lowerCamelCase__ = set()
lowerCamelCase__ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase__ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(__lowerCAmelCase )
chain_set_length += 1
lowerCamelCase__ = digit_factorial_sum(__lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase__ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution()}')
| 9 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
UpperCAmelCase__ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
UpperCAmelCase__ = "UperNetConfig"
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Union[int, Tuple[int, int]] , __UpperCAmelCase : Union[int, Tuple[int, int], str] = 0 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Union[int, Tuple[int, int]] = 1 , ) ->None:
"""simple docstring"""
super().__init__()
a = nn.Convad(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , bias=__UpperCAmelCase , dilation=__UpperCAmelCase , )
a = nn.BatchNormad(__UpperCAmelCase )
a = nn.ReLU()
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : torch.Tensor ) ->torch.Tensor:
"""simple docstring"""
a = self.conv(__UpperCAmelCase )
a = self.batch_norm(__UpperCAmelCase )
a = self.activation(__UpperCAmelCase )
return output
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) ->None:
"""simple docstring"""
super().__init__()
a = [
nn.AdaptiveAvgPoolad(__UpperCAmelCase ),
UperNetConvModule(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : torch.Tensor ) ->torch.Tensor:
"""simple docstring"""
a = input
for layer in self.layers:
a = layer(__UpperCAmelCase )
return hidden_state
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __UpperCAmelCase : Tuple[int, ...] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : bool ) ->None:
"""simple docstring"""
super().__init__()
a = pool_scales
a = align_corners
a = in_channels
a = channels
a = []
for i, pool_scale in enumerate(__UpperCAmelCase ):
a = UperNetPyramidPoolingBlock(pool_scale=__UpperCAmelCase , in_channels=__UpperCAmelCase , channels=__UpperCAmelCase )
self.blocks.append(__UpperCAmelCase )
self.add_module(str(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : torch.Tensor ) ->List[torch.Tensor]:
"""simple docstring"""
a = []
for ppm in self.blocks:
a = ppm(__UpperCAmelCase )
a = nn.functional.interpolate(
__UpperCAmelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__UpperCAmelCase )
return ppm_outs
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) ->List[str]:
"""simple docstring"""
super().__init__()
a = config
a = config.pool_scales # e.g. (1, 2, 3, 6)
a = in_channels
a = config.hidden_size
a = False
a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
a = nn.ModuleList()
a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
a = UperNetConvModule(__UpperCAmelCase , self.channels , kernel_size=1 )
a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCAmelCase )
self.fpn_convs.append(__UpperCAmelCase )
a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
self.apply(self._init_weights )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[int] ) ->str:
"""simple docstring"""
if isinstance(__UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : Tuple ) ->Union[str, Any]:
"""simple docstring"""
a = inputs[-1]
a = [x]
psp_outs.extend(self.psp_modules(__UpperCAmelCase ) )
a = torch.cat(__UpperCAmelCase , dim=1 )
a = self.bottleneck(__UpperCAmelCase )
return output
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : torch.Tensor ) ->torch.Tensor:
"""simple docstring"""
a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCAmelCase ) )
# build top-down path
a = len(__UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a = laterals[i - 1].shape[2:]
a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCAmelCase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
a = torch.cat(__UpperCAmelCase , dim=1 )
a = self.fpn_bottleneck(__UpperCAmelCase )
a = self.classifier(__UpperCAmelCase )
return output
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : Union[int, Tuple[int, int]] = 1 ) ->None:
"""simple docstring"""
super().__init__()
a = config
a = config.auxiliary_in_channels
a = config.auxiliary_channels
a = config.auxiliary_num_convs
a = config.auxiliary_concat_input
a = in_index
a = (kernel_size // 2) * dilation
a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , dilation=__UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , dilation=__UpperCAmelCase ) )
if self.num_convs == 0:
a = nn.Identity()
else:
a = nn.Sequential(*__UpperCAmelCase )
if self.concat_input:
a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCAmelCase , padding=kernel_size // 2 )
a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
self.apply(self._init_weights )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Optional[Any] ) ->Any:
"""simple docstring"""
if isinstance(__UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : torch.Tensor ) ->torch.Tensor:
"""simple docstring"""
a = encoder_hidden_states[self.in_index]
a = self.convs(__UpperCAmelCase )
if self.concat_input:
a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
a = self.classifier(__UpperCAmelCase )
return output
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = UperNetConfig
__snake_case = '''pixel_values'''
__snake_case = True
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str=False ) ->Dict:
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = value
UpperCAmelCase__ = R"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' , lowercase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Tuple , __UpperCAmelCase : Dict ) ->Any:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
a = UperNetHead(__UpperCAmelCase , in_channels=self.backbone.channels )
a = UperNetFCNHead(__UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[torch.Tensor] = None , __UpperCAmelCase : Optional[bool] = None , ) ->Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
a = return_dict if return_dict is not None else self.config.use_return_dict
a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a = output_attentions if output_attentions is not None else self.config.output_attentions
a = self.backbone.forward_with_filtered_kwargs(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , output_attentions=__UpperCAmelCase )
a = outputs.feature_maps
a = self.decode_head(__UpperCAmelCase )
a = nn.functional.interpolate(__UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__UpperCAmelCase )
a = None
if self.auxiliary_head is not None:
a = self.auxiliary_head(__UpperCAmelCase )
a = nn.functional.interpolate(
__UpperCAmelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__UpperCAmelCase )
a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
a = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
a = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
a = (logits,) + outputs[1:]
else:
a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 117 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase__ = logging.getLogger(__name__)
def _a ( a :Union[str, Any] , a :Tuple ) -> Optional[Any]:
# save results
if os.path.exists(a ):
if os.path.exists(os.path.join(a , '''config.json''' ) ) and os.path.isfile(
os.path.join(a , '''config.json''' ) ):
os.remove(os.path.join(a , '''config.json''' ) )
if os.path.exists(os.path.join(a , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(a , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(a , '''pytorch_model.bin''' ) )
else:
os.makedirs(a )
model.save_pretrained(a )
def _a ( a :List[Any] , a :Union[str, Any]=False ) -> int:
a = 2
if unlogit:
a = torch.pow(a , a )
a = p * torch.log(a )
a = 0
return -plogp.sum(dim=-1 )
def _a ( a :List[str] ) -> Union[str, Any]:
logger.info('''lv, h >\t''' + '''\t'''.join(F"""{x + 1}""" for x in range(len(a ) ) ) )
for row in range(len(a ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '''\t'''.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def _a ( a :Optional[int] , a :Dict , a :Tuple , a :Tuple=True , a :Union[str, Any]=True , a :str=None , a :Union[str, Any]=False ) -> int:
a , a = model.config.num_hidden_layers, model.config.num_attention_heads
a = torch.zeros(a , a ).to(args.device )
a = torch.zeros(a , a ).to(args.device )
if head_mask is None:
a = torch.ones(a , a ).to(args.device )
head_mask.requires_grad_(requires_grad=a )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a = None
a = 0.0
a = 0.0
for step, inputs in enumerate(tqdm(a , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
a = tuple(t.to(args.device ) for t in inputs )
((a) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a = model(a , labels=a , head_mask=a )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a , a , a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(a ):
a = entropy(attn.detach() , a )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(a ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a = 2
a = torch.pow(torch.pow(a , a ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(a )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(a )
logger.info('''Head ranked by importance scores''' )
a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a = torch.arange(
head_importance.numel() , device=args.device )
a = head_ranks.view_as(a )
print_ad_tensor(a )
return attn_entropy, head_importance, total_loss
def _a ( a :Optional[Any] , a :List[Any] , a :str ) -> Optional[Any]:
a , a , a = compute_heads_importance(a , a , a , compute_entropy=a )
a = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , a , original_score * args.masking_threshold )
a = torch.ones_like(a )
a = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a = original_score
while current_score >= original_score * args.masking_threshold:
a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a = float('''Inf''' )
a = head_importance.view(-1 ).sort()[1]
if len(a ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
a = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
a = new_head_mask.view(-1 )
a = 0.0
a = new_head_mask.view_as(a )
a = new_head_mask.clone().detach()
print_ad_tensor(a )
# Compute metric and head importance again
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , head_mask=a )
a = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , a , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(a )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( a :List[Any] , a :Optional[int] , a :Tuple , a :List[str] ) -> List[str]:
a = datetime.now()
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a )
a = 1 / loss
a = datetime.now() - before_time
a = sum(p.numel() for p in model.parameters() )
a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(a ) )
}
for k, v in heads_to_prune.items():
if isinstance(a , a ):
a = [
v,
]
assert sum(len(a ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(a )
a = sum(p.numel() for p in model.parameters() )
a = datetime.now()
a , a , a = compute_heads_importance(
a , a , a , compute_entropy=a , compute_importance=a , head_mask=a , actually_pruned=a , )
a = 1 / loss
a = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , a , a , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , a , a )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(a , args.output_dir )
def _a ( ) -> int:
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=a , type=a , required=a , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=a , type=a , required=a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=a , type=a , required=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=a , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=a , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=a , type=a , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=a , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=a , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=a , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=a , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=a , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=a , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=a , default=42 )
parser.add_argument('''--local_rank''' , type=a , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=a , default='''''' , help='''Can be used for distant debugging.''' )
a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a = torch.device('''cuda''' , args.local_rank )
a = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a = nn.parallel.DistributedDataParallel(
a , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=a )
elif args.n_gpu > 1:
a = nn.DataParallel(a )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=a )
torch.save(a , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , a )
# Prepare dataset
a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a = (torch.from_numpy(a ),)
a = TensorDataset(*a )
a = RandomSampler(a )
a = DataLoader(a , sampler=a , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(a , a , a )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a = mask_heads(a , a , a )
prune_heads(a , a , a , a )
if __name__ == "__main__":
main()
| 117 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : int = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
snake_case__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 703 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_UpperCAmelCase =precision
_UpperCAmelCase =ceil(precision / 14 )
_UpperCAmelCase =42_6880 * Decimal(1_0005 ).sqrt()
_UpperCAmelCase =1
_UpperCAmelCase =1359_1409
_UpperCAmelCase =Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_UpperCAmelCase =factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case__ : str = 5_0
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 592 | 0 |
def __UpperCAmelCase ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __UpperCAmelCase ( __A ) -> dict[str, str]:
'''simple docstring'''
UpperCAmelCase__ = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
UpperCAmelCase__ = remove_duplicates(key.upper() )
UpperCAmelCase__ = len(__A )
# First fill cipher with key characters
UpperCAmelCase__ = {alphabet[i]: char for i, char in enumerate(__A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__A ) , 2_6 ):
UpperCAmelCase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCAmelCase__ = alphabet[i - offset]
UpperCAmelCase__ = char
return cipher_alphabet
def __UpperCAmelCase ( __A , __A ) -> str:
'''simple docstring'''
return "".join(cipher_map.get(__A , __A ) for ch in message.upper() )
def __UpperCAmelCase ( __A , __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__A , __A ) for ch in message.upper() )
def __UpperCAmelCase ( ) -> None:
'''simple docstring'''
UpperCAmelCase__ = input("Enter message to encode or decode: " ).strip()
UpperCAmelCase__ = input("Enter keyword: " ).strip()
UpperCAmelCase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
UpperCAmelCase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
UpperCAmelCase__ = create_cipher_map(__A )
print(func(__A , __A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 475 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
a = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
a = get_tests_dir("fixtures/vocab.json")
a = get_tests_dir("fixtures")
class _A ( unittest.TestCase ):
__a = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def UpperCAmelCase ( self ):
_UpperCAmelCase = 0
def UpperCAmelCase ( self ):
_UpperCAmelCase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig()
_UpperCAmelCase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaFeatureExtractor()
_UpperCAmelCase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_UpperCAmelCase = WavaVecaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# drop `processor_class` in tokenizer
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """r""" ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
config_dict.pop("""processor_class""" )
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaFeatureExtractor()
_UpperCAmelCase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
_UpperCAmelCase = WavaVecaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# save in new folder
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# drop `processor_class` in feature extractor
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """r""" ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
config_dict.pop("""processor_class""" )
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_SCREAMING_SNAKE_CASE )
# copy relevant files
copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f:
f.write("""{}""" )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
_UpperCAmelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
_UpperCAmelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
_UpperCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def UpperCAmelCase ( self ):
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_UpperCAmelCase = CustomTokenizer(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = CustomProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
class _A ( __lowercase ):
__a = False
class _A ( __lowercase ):
__a = False
class _A ( __lowercase ):
__a = """AutoFeatureExtractor"""
__a = """AutoTokenizer"""
__a = False
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local classes.
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_UpperCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_UpperCAmelCase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase ( self ):
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def UpperCAmelCase ( self ):
_UpperCAmelCase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _A ( unittest.TestCase ):
__a = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def UpperCAmelCase ( cls ):
_UpperCAmelCase = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def UpperCAmelCase ( self ):
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_SCREAMING_SNAKE_CASE , """test-processor""" ) , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token )
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(F"{USER}/test-processor" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , _SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase ( self ):
_UpperCAmelCase = WavaVecaProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_SCREAMING_SNAKE_CASE , """test-processor-org""" ) , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization="""valid_org""" , )
_UpperCAmelCase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , _SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def UpperCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
_UpperCAmelCase = CustomTokenizer(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = CustomProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"{USER}/test-dynamic-processor" , token=self._token )
_UpperCAmelCase = Repository(_SCREAMING_SNAKE_CASE , clone_from=F"{USER}/test-dynamic-processor" , token=self._token )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_processing.py""" ) ) )
repo.push_to_hub()
_UpperCAmelCase = AutoProcessor.from_pretrained(F"{USER}/test-dynamic-processor" , trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" ) | 518 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowercase_ : Any = logging.get_logger(__name__)
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
UpperCAmelCase = NewType("""DataClass""", Any)
UpperCAmelCase = NewType("""DataClassType""", Any)
def _snake_case ( __snake_case : Dict ):
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _snake_case ( __snake_case : list ):
"""simple docstring"""
_lowerCamelCase : int = {str(__snake_case ): choice for choice in choices}
return lambda __snake_case : str_to_choice.get(__snake_case , __snake_case )
def _snake_case ( *,
__snake_case : Union[str, List[str]] = None , __snake_case : str = None , __snake_case : Any = dataclasses.MISSING , __snake_case : Callable[[], Any] = dataclasses.MISSING , __snake_case : dict = None , **__snake_case : List[str] , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_lowerCamelCase : str = {}
if aliases is not None:
_lowerCamelCase : Optional[Any] = aliases
if help is not None:
_lowerCamelCase : Any = help
return dataclasses.field(metadata=__snake_case , default=__snake_case , default_factory=__snake_case , **__snake_case )
class lowercase__ ( A_ ):
__UpperCAmelCase = 42
def __init__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
_lowerCamelCase : Dict = ArgumentDefaultsHelpFormatter
super().__init__(**SCREAMING_SNAKE_CASE)
if dataclasses.is_dataclass(SCREAMING_SNAKE_CASE):
_lowerCamelCase : Union[str, Any] = [dataclass_types]
_lowerCamelCase : Tuple = list(SCREAMING_SNAKE_CASE)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(SCREAMING_SNAKE_CASE)
@staticmethod
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Dict = F'--{field.name}'
_lowerCamelCase : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , SCREAMING_SNAKE_CASE):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""")
_lowerCamelCase : Any = kwargs.pop("""aliases""" , [])
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Optional[Any] = [aliases]
_lowerCamelCase : Optional[int] = getattr(field.type , """__origin__""" , field.type)
if origin_type is Union or (hasattr(SCREAMING_SNAKE_CASE , """UnionType""") and isinstance(SCREAMING_SNAKE_CASE , types.UnionType)):
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(SCREAMING_SNAKE_CASE) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
F' Problem encountered in field \'{field.name}\'.')
if type(SCREAMING_SNAKE_CASE) not in field.type.__args__:
# filter `str` in Union
_lowerCamelCase : Optional[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_lowerCamelCase : Union[str, Any] = getattr(field.type , """__origin__""" , field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_lowerCamelCase : List[str] = (
field.type.__args__[0] if isinstance(SCREAMING_SNAKE_CASE , field.type.__args__[1]) else field.type.__args__[1]
)
_lowerCamelCase : Tuple = getattr(field.type , """__origin__""" , field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_lowerCamelCase : Dict = {}
if origin_type is Literal or (isinstance(field.type , SCREAMING_SNAKE_CASE) and issubclass(field.type , SCREAMING_SNAKE_CASE)):
if origin_type is Literal:
_lowerCamelCase : Union[str, Any] = field.type.__args__
else:
_lowerCamelCase : Optional[int] = [x.value for x in field.type]
_lowerCamelCase : int = make_choice_type_function(kwargs["""choices"""])
if field.default is not dataclasses.MISSING:
_lowerCamelCase : Optional[int] = field.default
else:
_lowerCamelCase : Optional[int] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_lowerCamelCase : Dict = copy(SCREAMING_SNAKE_CASE)
# Hack because type=bool in argparse does not behave as we want.
_lowerCamelCase : List[str] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_lowerCamelCase : List[str] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_lowerCamelCase : List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
_lowerCamelCase : str = """?"""
# This is the value that will get picked if we do --field_name (without value)
_lowerCamelCase : Dict = True
elif isclass(SCREAMING_SNAKE_CASE) and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_lowerCamelCase : Any = field.type.__args__[0]
_lowerCamelCase : Optional[int] = """+"""
if field.default_factory is not dataclasses.MISSING:
_lowerCamelCase : int = field.default_factory()
elif field.default is dataclasses.MISSING:
_lowerCamelCase : Dict = True
else:
_lowerCamelCase : List[Any] = field.type
if field.default is not dataclasses.MISSING:
_lowerCamelCase : Any = field.default
elif field.default_factory is not dataclasses.MISSING:
_lowerCamelCase : List[Any] = field.default_factory()
else:
_lowerCamelCase : int = True
parser.add_argument(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_lowerCamelCase : Tuple = False
parser.add_argument(F'--no_{field.name}' , action="""store_false""" , dest=field.name , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any:
if hasattr(SCREAMING_SNAKE_CASE , """_argument_group_name"""):
_lowerCamelCase : List[Any] = self.add_argument_group(dtype._argument_group_name)
else:
_lowerCamelCase : str = self
try:
_lowerCamelCase : Dict[str, type] = get_type_hints(SCREAMING_SNAKE_CASE)
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""")
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(SCREAMING_SNAKE_CASE):
_lowerCamelCase : Optional[int] = """.""".join(map(SCREAMING_SNAKE_CASE , sys.version_info[:3]))
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""") from ex
raise
for field in dataclasses.fields(SCREAMING_SNAKE_CASE):
if not field.init:
continue
_lowerCamelCase : Union[str, Any] = type_hints[field.name]
self._parse_dataclass_field(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)):
_lowerCamelCase : str = []
if args_filename:
args_files.append(Path(SCREAMING_SNAKE_CASE))
elif look_for_args_file and len(sys.argv):
args_files.append(Path(sys.argv[0]).with_suffix(""".args"""))
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_lowerCamelCase : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , action="""append""")
# Use only remaining args for further parsing (remove the args_file_flag)
_lowerCamelCase , _lowerCamelCase : int = args_file_parser.parse_known_args(args=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = vars(SCREAMING_SNAKE_CASE).get(args_file_flag.lstrip("""-""") , SCREAMING_SNAKE_CASE)
if cmd_args_file_paths:
args_files.extend([Path(SCREAMING_SNAKE_CASE) for p in cmd_args_file_paths])
_lowerCamelCase : List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_lowerCamelCase : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
_lowerCamelCase , _lowerCamelCase : Tuple = self.parse_known_args(args=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = []
for dtype in self.dataclass_types:
_lowerCamelCase : str = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE) if f.init}
_lowerCamelCase : str = {k: v for k, v in vars(SCREAMING_SNAKE_CASE).items() if k in keys}
for k in keys:
delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = dtype(**SCREAMING_SNAKE_CASE)
outputs.append(SCREAMING_SNAKE_CASE)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(SCREAMING_SNAKE_CASE)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}')
return (*outputs,)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]:
_lowerCamelCase : List[str] = set(args.keys())
_lowerCamelCase : List[Any] = []
for dtype in self.dataclass_types:
_lowerCamelCase : Dict = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE) if f.init}
_lowerCamelCase : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
_lowerCamelCase : Any = dtype(**SCREAMING_SNAKE_CASE)
outputs.append(SCREAMING_SNAKE_CASE)
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(SCREAMING_SNAKE_CASE)}')
return tuple(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]:
with open(Path(SCREAMING_SNAKE_CASE) , encoding="""utf-8""") as open_json_file:
_lowerCamelCase : Optional[int] = json.loads(open_json_file.read())
_lowerCamelCase : Optional[Any] = self.parse_dict(SCREAMING_SNAKE_CASE , allow_extra_keys=SCREAMING_SNAKE_CASE)
return tuple(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]:
_lowerCamelCase : Optional[int] = self.parse_dict(yaml.safe_load(Path(SCREAMING_SNAKE_CASE).read_text()) , allow_extra_keys=SCREAMING_SNAKE_CASE)
return tuple(SCREAMING_SNAKE_CASE)
| 88 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase : Tuple = logging.getLogger(__name__)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Dict , a :Union[str, Any]=-1 ) -> List[str]:
# in NER datasets, the last column is usually reserved for NER label
__UpperCamelCase : str = label_idx
def _lowerCamelCase ( self :str , a :Tuple , a :Union[Split, str] ) -> List[InputExample]:
if isinstance(a , a ):
__UpperCamelCase : Dict = mode.value
__UpperCamelCase : Union[str, Any] = os.path.join(a , f'{mode}.txt' )
__UpperCamelCase : Any = 1
__UpperCamelCase : List[str] = []
with open(a , encoding="utf-8" ) as f:
__UpperCamelCase : Tuple = []
__UpperCamelCase : Dict = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=a , labels=a ) )
guid_index += 1
__UpperCamelCase : Dict = []
__UpperCamelCase : List[str] = []
else:
__UpperCamelCase : Union[str, Any] = line.split(" " )
words.append(splits[0] )
if len(a ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=a , labels=a ) )
return examples
def _lowerCamelCase ( self :int , a :TextIO , a :TextIO , a :List ) -> Optional[Any]:
__UpperCamelCase : Tuple = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__UpperCamelCase : Union[str, Any] = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(a )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def _lowerCamelCase ( self :Tuple , a :str ) -> List[str]:
if path:
with open(a , "r" ) as f:
__UpperCamelCase : List[str] = f.read().splitlines()
if "O" not in labels:
__UpperCamelCase : Any = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :List[str] ) -> int:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def _lowerCamelCase ( self :List[str] , a :str ) -> List[str]:
if path:
with open(a , "r" ) as f:
__UpperCamelCase : Optional[Any] = f.read().splitlines()
if "O" not in labels:
__UpperCamelCase : int = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def _lowerCamelCase ( self :List[str] , a :Union[str, Any] , a :Union[Split, str] ) -> List[InputExample]:
if isinstance(a , a ):
__UpperCamelCase : Optional[Any] = mode.value
__UpperCamelCase : List[str] = os.path.join(a , f'{mode}.txt' )
__UpperCamelCase : Dict = 1
__UpperCamelCase : List[str] = []
with open(a , encoding="utf-8" ) as f:
for sentence in parse_incr(a ):
__UpperCamelCase : Optional[int] = []
__UpperCamelCase : int = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(a ) == len(a )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=a , labels=a ) )
guid_index += 1
return examples
def _lowerCamelCase ( self :List[Any] , a :TextIO , a :TextIO , a :List ) -> str:
__UpperCamelCase : List[Any] = 0
for sentence in parse_incr(a ):
__UpperCamelCase : Tuple = preds_list[example_id]
__UpperCamelCase : Dict = ""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(a )
example_id += 1
def _lowerCamelCase ( self :List[str] , a :str ) -> List[str]:
if path:
with open(a , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
] | 557 | 0 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A__ : int = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A__ : Union[str, Any] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 'maskformer'
_A = {'hidden_size': 'mask_feature_size'}
_A = ['resnet', 'swin']
_A = ['detr']
def __init__( self , __UpperCamelCase = 2_56 , __UpperCamelCase = 2_56 , __UpperCamelCase = 0.1 , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = 0.02 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 1.0 , __UpperCamelCase = 20.0 , __UpperCamelCase = None , **__UpperCamelCase , )-> Union[str, Any]:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase__ : Dict = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : str = backbone_config.pop("model_type" )
UpperCAmelCase__ : Dict = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase__ : Tuple = config_class.from_dict(__UpperCamelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase__ : Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase__ : List[Any] = (
decoder_config.pop("model_type" ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase__ : Optional[int] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase__ : Optional[int] = config_class.from_dict(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = backbone_config
UpperCAmelCase__ : Optional[Any] = decoder_config
# main feature dimension for the model
UpperCAmelCase__ : Dict = fpn_feature_size
UpperCAmelCase__ : List[str] = mask_feature_size
# initializer
UpperCAmelCase__ : List[Any] = init_std
UpperCAmelCase__ : List[Any] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase__ : Optional[int] = cross_entropy_weight
UpperCAmelCase__ : Union[str, Any] = dice_weight
UpperCAmelCase__ : str = mask_weight
UpperCAmelCase__ : Optional[int] = use_auxiliary_loss
UpperCAmelCase__ : List[str] = no_object_weight
UpperCAmelCase__ : List[Any] = output_auxiliary_logits
UpperCAmelCase__ : Dict = self.decoder_config.encoder_attention_heads
UpperCAmelCase__ : int = self.decoder_config.num_hidden_layers
super().__init__(**__UpperCamelCase )
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )-> Optional[Any]:
return cls(
backbone_config=__UpperCamelCase , decoder_config=__UpperCamelCase , **__UpperCamelCase , )
def lowerCAmelCase__ ( self )-> Dict[str, any]:
UpperCAmelCase__ : Dict = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ : Any = self.backbone_config.to_dict()
UpperCAmelCase__ : int = self.decoder_config.to_dict()
UpperCAmelCase__ : Union[str, Any] = self.__class__.model_type
return output
| 660 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A__ : Dict = logging.get_logger(__name__)
def a__ ( lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = ['pixel_values']
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
super().__init__(**__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" )
UpperCAmelCase__ : Dict = do_resize
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : List[Any] = do_center_crop
UpperCAmelCase__ : str = crop_size
UpperCAmelCase__ : Optional[int] = resample
UpperCAmelCase__ : int = do_rescale
UpperCAmelCase__ : Union[str, Any] = rescale_factor
UpperCAmelCase__ : Union[str, Any] = offset
UpperCAmelCase__ : Dict = do_normalize
UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
UpperCAmelCase__ : Any = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple:
UpperCAmelCase__ : str = image.astype(np.floataa )
if offset:
UpperCAmelCase__ : Tuple = image - (scale / 2)
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase )
if do_resize:
UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase )
if do_normalize:
UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image:
UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : int = resample if resample is not None else self.resample
UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset
UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : List[str] = size if size is not None else self.size
UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase )
UpperCAmelCase__ : Optional[Any] = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase__ : Dict = {"pixel_values": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 660 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_lowerCAmelCase :List[Any] = logging.getLogger()
_lowerCAmelCase :Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase ( lowercase_ ):
'''simple docstring'''
def _UpperCamelCase ( self , lowercase__ ) -> List[Any]:
os.makedirs(a__ , exist_ok=a__ )
SCREAMING_SNAKE_CASE : Any = {'source': 'What is love ?', 'target': 'life'}
SCREAMING_SNAKE_CASE : Union[str, Any] = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
SCREAMING_SNAKE_CASE : Tuple = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(a__ , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(a__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__ = "pytorch" ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE : Dict = os.path.join(a__ , 'output' )
SCREAMING_SNAKE_CASE : Any = os.path.join(a__ , 'data' )
self._create_dummy_data(data_dir=a__ )
SCREAMING_SNAKE_CASE : List[str] = F"""\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n """.split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
SCREAMING_SNAKE_CASE : Union[str, Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(a__ , env=self.get_env() )
SCREAMING_SNAKE_CASE : Dict = os.path.join(a__ , 'metrics.json' )
with open(a__ ) as f:
SCREAMING_SNAKE_CASE : List[str] = json.load(a__ )
return result
@require_torch_gpu
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Union[str, Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Tuple = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : List[Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 251 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 400 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 707 |
def UpperCamelCase_( __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :Optional[Any] = [0 for i in range(len(__magic_name__ ) )]
# initialize interval's left pointer and right pointer
_lowerCAmelCase , _lowerCAmelCase :List[Any] = 0, 0
for i in range(1 , len(__magic_name__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_lowerCAmelCase :Any = min(right_pointer - i + 1 , z_result[i - left_pointer] )
_lowerCAmelCase :Any = min_edge
while go_next(__magic_name__ , __magic_name__ , __magic_name__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_lowerCAmelCase , _lowerCAmelCase :List[Any] = i, i + z_result[i] - 1
return z_result
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : str ):
"""simple docstring"""
return i + z_result[i] < len(__magic_name__ ) and s[z_result[i]] == s[i + z_result[i]]
def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ):
"""simple docstring"""
_lowerCAmelCase :int = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_lowerCAmelCase :Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__magic_name__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 382 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
__lowercase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowercase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase : Optional[int] = ''''''
else:
__lowercase : Any = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__lowercase : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
__lowercase : int = in_proj_bias[: config.hidden_size]
__lowercase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : Tuple = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : str = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Optional[int] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = dct.pop(__UpperCamelCase )
__lowercase : Optional[Any] = val
def __UpperCAmelCase ( ):
__lowercase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : Any = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = ViTConfig()
__lowercase : Optional[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowercase : Tuple = True
__lowercase : int = int(vit_name[-12:-10] )
__lowercase : str = int(vit_name[-9:-6] )
else:
__lowercase : Union[str, Any] = 10_00
__lowercase : Any = '''huggingface/label-files'''
__lowercase : str = '''imagenet-1k-id2label.json'''
__lowercase : int = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase : str = idalabel
__lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
__lowercase : Dict = int(vit_name[-6:-4] )
__lowercase : List[Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
__lowercase : Union[str, Any] = 1_92
__lowercase : int = 7_68
__lowercase : Optional[Any] = 12
__lowercase : Optional[Any] = 3
elif vit_name[9:].startswith('''small''' ):
__lowercase : Optional[int] = 3_84
__lowercase : List[Any] = 15_36
__lowercase : Optional[Any] = 12
__lowercase : str = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
__lowercase : Dict = 7_68
__lowercase : List[str] = 23_04
__lowercase : Union[str, Any] = 8
__lowercase : Dict = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
__lowercase : Optional[int] = 10_24
__lowercase : Union[str, Any] = 40_96
__lowercase : Union[str, Any] = 24
__lowercase : Tuple = 16
elif vit_name[4:].startswith('''huge''' ):
__lowercase : str = 12_80
__lowercase : Tuple = 51_20
__lowercase : Union[str, Any] = 32
__lowercase : Any = 16
# load original model from timm
__lowercase : Union[str, Any] = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase : Optional[int] = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
__lowercase : int = create_rename_keys(__UpperCamelCase , __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowercase : Dict = ViTModel(__UpperCamelCase ).eval()
else:
__lowercase : Any = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowercase : Optional[int] = DeiTImageProcessor(size=config.image_size )
else:
__lowercase : int = ViTImageProcessor(size=config.image_size )
__lowercase : str = image_processor(images=prepare_img() , return_tensors='''pt''' )
__lowercase : str = encoding['''pixel_values''']
__lowercase : Dict = model(__UpperCamelCase )
if base_model:
__lowercase : Tuple = timm_model.forward_features(__UpperCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCamelCase , outputs.pooler_output , atol=1e-3 )
else:
__lowercase : Any = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
a_ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 76 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A__ : Union[str, Any] = random.Random()
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=1.0 ,__UpperCamelCase : Dict=None ,__UpperCamelCase : List[Any]=None ):
if rng is None:
lowerCAmelCase_ : List[Any] = global_rng
lowerCAmelCase_ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __snake_case ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A_ : Dict , A_ : Optional[Any]=7 , A_ : Union[str, Any]=4_0_0 , A_ : Dict=2_0_0_0 , A_ : Dict=1 , A_ : Optional[Any]=0.0 , A_ : Tuple=1_6_0_0_0 , A_ : Any=True , A_ : Any=8_0 , A_ : str=1_6 , A_ : Union[str, Any]=6_4 , A_ : List[Any]="hann_window" , A_ : Union[str, Any]=8_0 , A_ : Dict=7_6_0_0 , A_ : List[str]=1e-10 , A_ : Union[str, Any]=True , ):
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : str = min_seq_length
lowerCAmelCase_ : Optional[int] = max_seq_length
lowerCAmelCase_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : List[Any] = feature_size
lowerCAmelCase_ : Any = padding_value
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : str = do_normalize
lowerCAmelCase_ : str = num_mel_bins
lowerCAmelCase_ : List[str] = hop_length
lowerCAmelCase_ : Tuple = win_length
lowerCAmelCase_ : Tuple = win_function
lowerCAmelCase_ : Optional[int] = fmin
lowerCAmelCase_ : List[str] = fmax
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : Optional[Any] = return_attention_mask
def UpperCAmelCase__ ( self : List[str]):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCAmelCase__ ( self : Optional[int] , A_ : Any=False , A_ : List[str]=False):
def _flatten(A_ : Tuple):
return list(itertools.chain(*A_))
if equal_length:
lowerCAmelCase_ : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Dict = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
lowerCAmelCase_ : Optional[Any] = [np.asarray(A_) for x in speech_inputs]
return speech_inputs
def UpperCAmelCase__ ( self : Tuple , A_ : Optional[Any]=False , A_ : Dict=False):
if equal_length:
lowerCAmelCase_ : Union[str, Any] = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Tuple = [
floats_list((x, self.num_mel_bins))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
lowerCAmelCase_ : Optional[int] = [np.asarray(A_) for x in speech_inputs]
return speech_inputs
@require_torch
class __snake_case ( UpperCamelCase_ ,unittest.TestCase ):
_a = SpeechTaFeatureExtractor
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = SpeechTaFeatureExtractionTester(self)
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int):
self.assertTrue(np.all(np.mean(A_ , axis=0) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(A_ , axis=0) - 1) < 1e-3))
def UpperCAmelCase__ ( self : List[Any]):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Optional[Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : Any = [np.asarray(A_) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : Tuple = feat_extract(speech_inputs[0] , return_tensors='''np''').input_values
lowerCAmelCase_ : Dict = feat_extract(np_speech_inputs[0] , return_tensors='''np''').input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
# Test batched
lowerCAmelCase_ : Any = feat_extract(A_ , return_tensors='''np''').input_values
lowerCAmelCase_ : Optional[int] = feat_extract(A_ , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : Optional[Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : Union[str, Any] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase_ : str = [None, 1_6_0_0, None]
for max_length, padding in zip(A_ , A_):
lowerCAmelCase_ : str = feat_extract(A_ , padding=A_ , max_length=A_ , return_tensors='''np''')
lowerCAmelCase_ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0])
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0])
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6)
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0])
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : Dict = range(8_0_0 , 1_4_0_0 , 2_0_0)
lowerCAmelCase_ : Union[str, Any] = [floats_list((1, x))[0] for x in lengths]
lowerCAmelCase_ : List[str] = ['''longest''', '''max_length''', '''do_not_pad''']
lowerCAmelCase_ : Optional[Any] = [None, 1_6_0_0, None]
for max_length, padding in zip(A_ , A_):
lowerCAmelCase_ : Optional[int] = feat_extract(A_ , max_length=A_ , padding=A_)
lowerCAmelCase_ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0])
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0])
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : List[Any] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : int = feat_extract(
A_ , truncation=A_ , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''')
lowerCAmelCase_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1])
self._check_zero_mean_unit_variance(input_values[2])
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : str = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : Any = feat_extract(
A_ , truncation=A_ , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''')
lowerCAmelCase_ : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0))
lowerCAmelCase_ : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : List[Any] = feat_extract(
A_ , truncation=A_ , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''')
lowerCAmelCase_ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0])
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0])
self._check_zero_mean_unit_variance(input_values[2])
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0))
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
lowerCAmelCase_ : List[str] = np.random.rand(1_0_0).astype(np.floataa)
lowerCAmelCase_ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
lowerCAmelCase_ : List[str] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def UpperCAmelCase__ ( self : Optional[int]):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : str = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
lowerCAmelCase_ : Optional[int] = [np.asarray(A_) for speech_input in speech_inputs]
# Test feature size
lowerCAmelCase_ : Optional[Any] = feature_extractor(audio_target=A_ , padding=A_ , return_tensors='''np''').input_values
self.assertTrue(input_values.ndim == 3)
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins)
# Test not batched input
lowerCAmelCase_ : int = feature_extractor(speech_inputs[0] , return_tensors='''np''').input_values
lowerCAmelCase_ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''').input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
# Test batched
lowerCAmelCase_ : Optional[int] = feature_extractor(A_ , return_tensors='''np''').input_values
lowerCAmelCase_ : int = feature_extractor(A_ , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Tuple = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase_ : Any = np.asarray(A_)
lowerCAmelCase_ : Any = feature_extractor(A_ , return_tensors='''np''').input_values
lowerCAmelCase_ : int = feature_extractor(A_ , return_tensors='''np''').input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
lowerCAmelCase_ : Any = feat_extract.model_input_names[0]
lowerCAmelCase_ : Dict = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(A_) == len(A_) for x, y in zip(A_ , processed_features[input_name])))
lowerCAmelCase_ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A_)
lowerCAmelCase_ : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''np''')
lowerCAmelCase_ : Dict = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowerCAmelCase_ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=A_)
lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict)
lowerCAmelCase_ : Optional[int] = feat_extract.model_input_names[0]
lowerCAmelCase_ : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''')
lowerCAmelCase_ : Any = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowerCAmelCase_ : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins))
@require_torch
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : int = self.feature_extraction_class(**self.feat_extract_dict)
lowerCAmelCase_ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_ : Union[str, Any] = feat_extract.model_input_names[0]
lowerCAmelCase_ : Union[str, Any] = BatchFeature({input_name: speech_inputs})
lowerCAmelCase_ : str = feat_extract.num_mel_bins # hack!
lowerCAmelCase_ : List[Any] = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''')[input_name]
lowerCAmelCase_ : Tuple = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''pt''')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1e-2)
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : str = self.feat_extract_dict
lowerCAmelCase_ : str = True
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**A_)
lowerCAmelCase_ : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_ : str = [len(A_) for x in speech_inputs]
lowerCAmelCase_ : List[str] = feat_extract.model_input_names[0]
lowerCAmelCase_ : Dict = BatchFeature({input_name: speech_inputs})
lowerCAmelCase_ : str = feat_extract.num_mel_bins # hack!
lowerCAmelCase_ : Tuple = feat_extract.pad(A_ , padding='''longest''' , return_tensors='''np''')
self.assertIn('''attention_mask''' , A_)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Tuple = self.feat_extract_dict
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Any = self.feature_extraction_class(**A_)
lowerCAmelCase_ : Any = self.feat_extract_tester.prepare_inputs_for_target()
lowerCAmelCase_ : Tuple = [len(A_) for x in speech_inputs]
lowerCAmelCase_ : Tuple = feat_extract.model_input_names[0]
lowerCAmelCase_ : str = BatchFeature({input_name: speech_inputs})
lowerCAmelCase_ : int = min(A_)
lowerCAmelCase_ : Dict = feat_extract.num_mel_bins # hack!
lowerCAmelCase_ : str = feat_extract.pad(
A_ , padding='''max_length''' , max_length=A_ , truncation=A_ , return_tensors='''np''')
self.assertIn('''attention_mask''' , A_)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
def UpperCAmelCase__ ( self : int , A_ : Optional[int]):
from datasets import load_dataset
lowerCAmelCase_ : List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
lowerCAmelCase_ : Optional[Any] = ds.sort('''id''').select(range(A_))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Any):
# fmt: off
lowerCAmelCase_ : Optional[int] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03])
# fmt: on
lowerCAmelCase_ : Tuple = self._load_datasamples(1)
lowerCAmelCase_ : List[str] = SpeechTaFeatureExtractor()
lowerCAmelCase_ : List[Any] = feature_extractor(A_ , return_tensors='''pt''').input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0))
self.assertTrue(torch.allclose(input_values[0, :3_0] , A_ , atol=1e-6))
def UpperCAmelCase__ ( self : Optional[int]):
# fmt: off
lowerCAmelCase_ : Tuple = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998])
# fmt: on
lowerCAmelCase_ : Union[str, Any] = self._load_datasamples(1)
lowerCAmelCase_ : Any = SpeechTaFeatureExtractor()
lowerCAmelCase_ : Dict = feature_extractor(audio_target=A_ , return_tensors='''pt''').input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0))
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , A_ , atol=1e-4))
| 171 | 0 |
A : Optional[int] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : Any = [{"type": "code", "content": INSTALL_CONTENT}]
A : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 712 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A : Tuple = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
A : Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
A : str = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=4 , __lowerCamelCase : Dict=False ):
'''simple docstring'''
lowerCamelCase__ : str = compute_bleu(
reference_corpus=__lowerCamelCase , translation_corpus=__lowerCamelCase , max_order=__lowerCamelCase , smooth=__lowerCamelCase )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 5 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 79 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[str] = s.rsplit(__lowerCamelCase , __lowerCamelCase )
return new.join(__lowerCamelCase )
def _lowerCamelCase ( __lowerCamelCase ) -> str:
'''simple docstring'''
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _lowerCamelCase ( __lowerCamelCase ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : Union[str, Any] = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase__ : Optional[Any] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
UpperCAmelCase__ : Optional[int] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
UpperCAmelCase__ : List[Any] = rreplace(__lowerCamelCase , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
UpperCAmelCase__ : Optional[int] = rreplace(__lowerCamelCase , """.b""" , """.bias""" , 1 )
UpperCAmelCase__ : Union[str, Any] = value.float()
return upgrade
@torch.no_grad()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=True ) -> str:
'''simple docstring'''
from dall_e import Encoder
UpperCAmelCase__ : Dict = Encoder()
if os.path.exists(__lowerCamelCase ):
UpperCAmelCase__ : Optional[Any] = torch.load(__lowerCamelCase )
else:
UpperCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase__ : Any = ckpt.state_dict()
encoder.load_state_dict(__lowerCamelCase )
if config_path is not None:
UpperCAmelCase__ : Dict = FlavaImageCodebookConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase__ : Optional[Any] = FlavaImageCodebookConfig()
UpperCAmelCase__ : Optional[Any] = FlavaImageCodebook(__lowerCamelCase ).eval()
UpperCAmelCase__ : str = encoder.state_dict()
UpperCAmelCase__ : Optional[int] = upgrade_state_dict(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
UpperCAmelCase__ : List[str] = hf_model.state_dict()
UpperCAmelCase__ : Tuple = count_parameters(__lowerCamelCase )
UpperCAmelCase__ : int = count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 79 | 1 |
# flake8: noqa
# Lint as: python3
__lowerCamelCase : List[Any] = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 448 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( _a : int , _a : Any , _a : Union[str, Any] , _a : Tuple ):
'''simple docstring'''
snake_case__ : Any =BigBirdConfig.from_json_file(_a )
print(f"Building PyTorch model from configuration: {config}" )
if is_trivia_qa:
snake_case__ : str =BigBirdForQuestionAnswering(_a )
else:
snake_case__ : Optional[int] =BigBirdForPreTraining(_a )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_a , _a , is_trivia_qa=_a )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_a )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 448 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 232 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( lowercase ):
UpperCamelCase : int = ["""image_processor""", """tokenizer"""]
UpperCamelCase : Union[str, Any] = """BridgeTowerImageProcessor"""
UpperCamelCase : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
UpperCAmelCase__ : Any = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
# add pixel_values + pixel_mask
UpperCAmelCase__ : List[str] = self.image_processor(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , **UpperCamelCase_ )
encoding.update(UpperCamelCase_ )
return encoding
def __snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def __snake_case ( self ):
UpperCAmelCase__ : Dict = self.tokenizer.model_input_names
UpperCAmelCase__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a :
UpperCamelCase : int
UpperCamelCase : int
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : list[list[Edge]] = [[] for _ in range(UpperCamelCase_ )]
UpperCAmelCase__ : Union[str, Any] = size
def __getitem__( self , UpperCamelCase_ ):
return iter(self._graph[vertex] )
@property
def __snake_case ( self ):
return self._size
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = deque([start_vertex] )
UpperCAmelCase__ : list[int | None] = [None] * self.size
UpperCAmelCase__ : List[str] = 0
while queue:
UpperCAmelCase__ : Dict = queue.popleft()
UpperCAmelCase__ : Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase__ : Optional[int] = current_distance + edge.weight
UpperCAmelCase__ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase__ : Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class __snake_case ( torch.nn.Module):
def __init__( self : Any , __lowerCAmelCase : Any="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__lowerCAmelCase , self ).__init__()
_lowerCamelCase : Union[str, Any] = AutoModel.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
_lowerCamelCase : Tuple = torch.nn.CosineSimilarity(3 , 1E-08 )
_lowerCamelCase : str = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : str , **__lowerCAmelCase : int ):
"""simple docstring"""
return self.bert(**__lowerCAmelCase ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Any = W_supports['''sizes'''].tolist()
_lowerCamelCase : str = W_supports['''start_token_id'''].item()
_lowerCamelCase : Optional[int] = W_supports['''end_token_id'''].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_lowerCamelCase : List[Any] = self.BERT(**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.BERT(**__lowerCAmelCase )
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : Optional[Any] = W_supports['''input_ids'''] == start_token_id
_lowerCamelCase : Tuple = W_supports['''input_ids'''] == end_token_id
for i, size in enumerate(__lowerCAmelCase ):
if i == 0:
_lowerCamelCase : List[str] = 0
else:
_lowerCamelCase : Tuple = support_sizes[i - 1]
_lowerCamelCase : List[Any] = S[s : s + size][start_token_masks[s : s + size]]
_lowerCamelCase : Tuple = S[s : s + size][end_token_masks[s : s + size]]
_lowerCamelCase : Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_lowerCamelCase : List[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_lowerCamelCase : Optional[Any] = torch.vstack((p_starts, p_start) )
_lowerCamelCase : List[str] = torch.vstack((p_ends, p_end) )
else:
_lowerCamelCase : List[Any] = p_start
_lowerCamelCase : Optional[int] = p_end
return p_starts, p_ends
| 83 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = inspect.getfile(accelerate.test_utils )
lowerCamelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
lowerCamelCase_ = test_metrics
@require_cpu
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
print(f'''Found {torch.cuda.device_count()} devices.''' )
lowerCamelCase_ = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
| 42 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = field(default="""language-modeling""", metadata={"""include_in_asdict_even_if_is_default""": True} )
_a = Features({"""text""": Value("""string""" )} )
_a = Features({} )
_a = "text"
@property
def a__ ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 238 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class snake_case ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ), supervised_keys=_lowercase, )
def a__ ( self, _lowercase, _lowercase ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'examples': get_test_dummy_examples()} )]
def a__ ( self, _lowercase, _lowercase ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowercase )
class snake_case ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def a__ ( self ) -> Dict:
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ), supervised_keys=_lowercase, )
def a__ ( self, _lowercase, _lowercase ) -> Union[str, Any]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'examples': get_test_nested_examples()} )
]
def a__ ( self, _lowercase, _lowercase ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowercase )
def _UpperCamelCase ( ) -> str:
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def _UpperCamelCase ( ) -> List[str]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class snake_case ( lowercase_ ):
"""simple docstring"""
@require_beam
def a__ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowercase, beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowercase, builder.name, 'default', '0.0.0', f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows, _lowercase )
self.assertEqual(dset['train'].info.splits['train'].num_examples, _lowercase )
self.assertDictEqual(dset['train'][0], get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1], get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowercase, builder.name, 'default', '0.0.0', 'dataset_info.json' ) ) )
del dset
@require_beam
def a__ ( self ) -> List[str]:
import apache_beam as beam
SCREAMING_SNAKE_CASE_ = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowercase, beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
SCREAMING_SNAKE_CASE_ = partial(_lowercase, num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowercase, builder.name, 'default', '0.0.0', f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowercase, builder.name, 'default', '0.0.0', f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features, datasets.Features({'content': datasets.Value('string' )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows, _lowercase )
self.assertEqual(dset['train'].info.splits['train'].num_examples, _lowercase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ), sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowercase, builder.name, 'default', '0.0.0', 'dataset_info.json' ) ) )
del dset
@require_beam
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = DummyBeamDataset(cache_dir=_lowercase )
self.assertRaises(datasets.builder.MissingBeamOptions, builder.download_and_prepare )
@require_beam
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE_ = NestedBeamDataset(cache_dir=_lowercase, beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowercase, builder.name, 'default', '0.0.0', f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features, datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
SCREAMING_SNAKE_CASE_ = builder.as_dataset()
self.assertEqual(dset['train'].num_rows, _lowercase )
self.assertEqual(dset['train'].info.splits['train'].num_examples, _lowercase )
self.assertDictEqual(dset['train'][0], get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1], get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowercase, builder.name, 'default', '0.0.0', 'dataset_info.json' ) ) )
del dset
| 238 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase__ ( a__ ):
a__ : torch.FloatTensor
class lowerCAmelCase__ ( a__ , a__ ):
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] = 3 , SCREAMING_SNAKE_CASE__ : Tuple = 3 , SCREAMING_SNAKE_CASE__ : Dict = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE__ : Tuple = (64,) , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 , SCREAMING_SNAKE_CASE__ : int = "silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3 , SCREAMING_SNAKE_CASE__ : Optional[Any] = 32 , SCREAMING_SNAKE_CASE__ : Optional[int] = 2_56 , SCREAMING_SNAKE_CASE__ : List[str] = 32 , SCREAMING_SNAKE_CASE__ : Union[str, Any] = None , SCREAMING_SNAKE_CASE__ : str = 0.18215 , SCREAMING_SNAKE_CASE__ : Dict = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=__a , out_channels=__a , down_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , double_z=__a , )
__lowerCamelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
__lowerCamelCase = nn.Convad(__a , __a , 1 )
__lowerCamelCase = VectorQuantizer(__a , __a , beta=0.25 , remap=__a , sane_index_shape=__a )
__lowerCamelCase = nn.Convad(__a , __a , 1 )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=__a , out_channels=__a , up_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , norm_type=__a , )
@apply_forward_hook
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = True ) -> VQEncoderOutput:
__lowerCamelCase = self.encoder(__a )
__lowerCamelCase = self.quant_conv(__a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__a )
@apply_forward_hook
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any = False , SCREAMING_SNAKE_CASE__ : Dict = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
__lowerCamelCase = self.quantize(__a )
else:
__lowerCamelCase = h
__lowerCamelCase = self.post_quant_conv(__a )
__lowerCamelCase = self.decoder(__a , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__lowerCamelCase = sample
__lowerCamelCase = self.encode(__a ).latents
__lowerCamelCase = self.decode(__a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
| 298 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _a :
'''simple docstring'''
def __init__( self ,__a ,__a ,__a ) -> Tuple:
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
snake_case : str = img
snake_case : str = img.shape[1]
snake_case : Union[str, Any] = img.shape[0]
snake_case : Optional[int] = dst_width
snake_case : List[str] = dst_height
snake_case : List[str] = self.src_w / self.dst_w
snake_case : int = self.src_h / self.dst_h
snake_case : List[Any] = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def snake_case_ ( self ) -> int:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
snake_case : Optional[Any] = self.img[self.get_y(__a )][self.get_x(__a )]
def snake_case_ ( self ,__a ) -> int:
return int(self.ratio_x * x )
def snake_case_ ( self ,__a ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
lowercase, lowercase : Optional[int] = 800, 600
lowercase : Union[str, Any] = imread("""image_data/lena.jpg""", 1)
lowercase : int = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 116 | 0 |
"""simple docstring"""
import heapq
def __lowercase ( lowerCamelCase_ : dict ):
SCREAMING_SNAKE_CASE__ = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase_ , [-1 * len(lowerCamelCase_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
SCREAMING_SNAKE_CASE__ = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
SCREAMING_SNAKE_CASE__ = heapq.heappop(lowerCamelCase_ )[1][0]
chosen_vertices.add(lowerCamelCase_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
SCREAMING_SNAKE_CASE__ = elem[1][1].index(lowerCamelCase_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 112 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowercase ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE__ = "_"
if count > 1:
return False
else:
return "".join(lowerCamelCase_ )
def __lowercase ( lowerCamelCase_ : list[str] ):
SCREAMING_SNAKE_CASE__ = []
while True:
SCREAMING_SNAKE_CASE__ = ["$"] * len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = compare_string(binary[i] , binary[j] )
if k is False:
SCREAMING_SNAKE_CASE__ = "*"
SCREAMING_SNAKE_CASE__ = "*"
temp.append("X" )
for i in range(len(lowerCamelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCamelCase_ ) == 0:
return pi
SCREAMING_SNAKE_CASE__ = list(set(lowerCamelCase_ ) )
def __lowercase ( lowerCamelCase_ : int , lowerCamelCase_ : Sequence[float] ):
SCREAMING_SNAKE_CASE__ = []
for minterm in minterms:
SCREAMING_SNAKE_CASE__ = ""
for _ in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCamelCase_ )
return temp
def __lowercase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(lowerCamelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowercase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : list[str] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [0] * len(lowerCamelCase_ )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = -1
for j in range(len(lowerCamelCase_ ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE__ = j
if count == 1:
SCREAMING_SNAKE_CASE__ = 1
for i in range(len(lowerCamelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = -1
SCREAMING_SNAKE_CASE__ = 0
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE__ = count_n
SCREAMING_SNAKE_CASE__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = 0
def __lowercase ( lowerCamelCase_ : list[str] , lowerCamelCase_ : list[str] ):
SCREAMING_SNAKE_CASE__ = [[0 for x in range(len(lowerCamelCase_ ) )] for x in range(len(lowerCamelCase_ ) )]
for i in range(len(lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE__ = prime_implicants[i].count("_" )
for j in range(len(lowerCamelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE__ = 1
return chart
def __lowercase ( ):
SCREAMING_SNAKE_CASE__ = int(input("Enter the no. of variables\n" ) )
SCREAMING_SNAKE_CASE__ = [
float(lowerCamelCase_ )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
SCREAMING_SNAKE_CASE__ = decimal_to_binary(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = check(lowerCamelCase_ )
print("Prime Implicants are:" )
print(lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = prime_implicant_chart(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = selection(lowerCamelCase_ , lowerCamelCase_ )
print("Essential Prime Implicants are:" )
print(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 112 | 1 |
import argparse
import os
import re
snake_case__ : str = """src/transformers"""
# Pattern that looks at the indentation in a line.
snake_case__ : Tuple = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
snake_case__ : str = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
snake_case__ : Dict = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
snake_case__ : str = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
snake_case__ : Dict = re.compile(R"""\[([^\]]+)\]""")
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = _re_indent.search(_SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowercase = 0
__lowercase = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(_SCREAMING_SNAKE_CASE ):
index += 1
__lowercase = ["\n".join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
if index < len(_SCREAMING_SNAKE_CASE ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
def _inner(_SCREAMING_SNAKE_CASE ):
return key(_SCREAMING_SNAKE_CASE ).lower().replace("_" , "" )
return _inner
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
# If no key is provided, we use a noop.
def noop(_SCREAMING_SNAKE_CASE ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()]
__lowercase = ignore_underscore(_SCREAMING_SNAKE_CASE )
return sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE )
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
# This inner function sort imports between [ ].
def _replace(_SCREAMING_SNAKE_CASE ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
__lowercase = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]"
__lowercase = import_statement.split("\n" )
if len(_SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == "[" else 1
__lowercase = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ", ".join([F"""\"{k}\"""" for k in sort_objects(_SCREAMING_SNAKE_CASE )] )
return "\n".join(_SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , _SCREAMING_SNAKE_CASE )
return import_statement
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
_SCREAMING_SNAKE_CASE , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split("\n" )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(_SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(_SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = "\n".join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE , indent_level=_SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None]
__lowercase = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def snake_case_ ( _SCREAMING_SNAKE_CASE=True ):
__lowercase = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" ) , check_only=_SCREAMING_SNAKE_CASE )
if result:
__lowercase = [os.path.join(_SCREAMING_SNAKE_CASE , "__init__.py" )]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(F"""Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.""" )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
snake_case__ : Optional[int] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 402 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : List[Any] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 402 | 1 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( _A ):
UpperCAmelCase__ = (PNDMScheduler,)
UpperCAmelCase__ = (("num_inference_steps", 5_0),)
def lowerCamelCase__ ( self : Union[str, Any] , **__snake_case : str ) -> Optional[Any]:
__magic_name__: Union[str, Any] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowerCamelCase )
return config
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : List[str]=0 , **__snake_case : Any ) -> Tuple:
__magic_name__: Dict = dict(self.forward_default_kwargs )
__magic_name__: Dict = kwargs.pop("""num_inference_steps""" , __lowerCamelCase )
__magic_name__: Union[str, Any] = self.dummy_sample
__magic_name__: List[Any] = 0.1 * sample
__magic_name__: List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__magic_name__: Any = self.get_scheduler_config(**__lowerCamelCase )
__magic_name__: int = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
__magic_name__: Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
__magic_name__: Dict = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
__magic_name__: Tuple = dummy_past_residuals[:]
__magic_name__: Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__magic_name__: str = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__magic_name__: int = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__magic_name__: List[str] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
pass
def lowerCamelCase__ ( self : List[str] , __snake_case : Optional[Any]=0 , **__snake_case : Tuple ) -> str:
__magic_name__: List[str] = dict(self.forward_default_kwargs )
__magic_name__: Optional[int] = kwargs.pop("""num_inference_steps""" , __lowerCamelCase )
__magic_name__: List[str] = self.dummy_sample
__magic_name__: Any = 0.1 * sample
__magic_name__: Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__magic_name__: Tuple = self.get_scheduler_config()
__magic_name__: Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__magic_name__: Optional[int] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
__magic_name__: str = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__magic_name__: Optional[Any] = dummy_past_residuals[:]
__magic_name__: Union[str, Any] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__magic_name__: Dict = new_scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__magic_name__: Union[str, Any] = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__magic_name__: List[Any] = new_scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : Tuple , **__snake_case : Any ) -> Union[str, Any]:
__magic_name__: Optional[Any] = self.scheduler_classes[0]
__magic_name__: List[Any] = self.get_scheduler_config(**__lowerCamelCase )
__magic_name__: str = scheduler_class(**__lowerCamelCase )
__magic_name__: List[str] = 1_0
__magic_name__: Union[str, Any] = self.dummy_model()
__magic_name__: int = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
__magic_name__: Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
__magic_name__: Optional[int] = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__magic_name__: Tuple = model(__lowerCamelCase , __lowerCamelCase )
__magic_name__: Tuple = scheduler.step_plms(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
__magic_name__: Union[str, Any] = dict(self.forward_default_kwargs )
__magic_name__: Union[str, Any] = kwargs.pop("""num_inference_steps""" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
__magic_name__: Dict = self.get_scheduler_config()
__magic_name__: List[str] = scheduler_class(**__lowerCamelCase )
__magic_name__: List[Any] = self.dummy_sample
__magic_name__: List[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , """set_timesteps""" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , """set_timesteps""" ):
__magic_name__: List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__magic_name__: List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__magic_name__: Tuple = dummy_past_residuals[:]
__magic_name__: Dict = scheduler.step_prk(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__magic_name__: List[Any] = scheduler.step_prk(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__magic_name__: Any = scheduler.step_plms(__lowerCamelCase , 0 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
__magic_name__: str = scheduler.step_plms(__lowerCamelCase , 1 , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self : str ) -> Tuple:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
__magic_name__: Dict = self.scheduler_classes[0]
__magic_name__: Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
__magic_name__: Optional[int] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase , beta_end=__lowerCamelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def lowerCamelCase__ ( self : Any ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__lowerCamelCase )
def lowerCamelCase__ ( self : Tuple ) -> int:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__lowerCamelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__magic_name__: str = 2_7
for scheduler_class in self.scheduler_classes:
__magic_name__: Tuple = self.dummy_sample
__magic_name__: List[Any] = 0.1 * sample
__magic_name__: List[Any] = self.get_scheduler_config()
__magic_name__: List[str] = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__magic_name__: Dict = scheduler.step_prk(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
def lowerCamelCase__ ( self : str ) -> int:
with self.assertRaises(__lowerCamelCase ):
__magic_name__: Union[str, Any] = self.scheduler_classes[0]
__magic_name__: Union[str, Any] = self.get_scheduler_config()
__magic_name__: List[str] = scheduler_class(**__lowerCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase__ ( self : List[str] ) -> Dict:
__magic_name__: Optional[Any] = self.full_loop()
__magic_name__: Tuple = torch.sum(torch.abs(__lowerCamelCase ) )
__magic_name__: Optional[int] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def lowerCamelCase__ ( self : List[str] ) -> Any:
__magic_name__: Any = self.full_loop(prediction_type="""v_prediction""" )
__magic_name__: Union[str, Any] = torch.sum(torch.abs(__lowerCamelCase ) )
__magic_name__: List[str] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
# We specify different beta, so that the first alpha is 0.99
__magic_name__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
__magic_name__: Dict = torch.sum(torch.abs(__lowerCamelCase ) )
__magic_name__: Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def lowerCamelCase__ ( self : int ) -> Any:
# We specify different beta, so that the first alpha is 0.99
__magic_name__: Any = self.full_loop(set_alpha_to_one=__lowerCamelCase , beta_start=0.01 )
__magic_name__: List[Any] = torch.sum(torch.abs(__lowerCamelCase ) )
__magic_name__: Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 717 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCamelCase = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ) -> Union[str, Any]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def a ( __UpperCAmelCase : int ) -> Tuple:
__magic_name__: int = _TestCommandArgs(dataset=__UpperCAmelCase , all_configs=__UpperCAmelCase , save_infos=__UpperCAmelCase )
__magic_name__: List[str] = TestCommand(*__UpperCAmelCase )
test_command.run()
__magic_name__: Union[str, Any] = os.path.join(__UpperCAmelCase , """README.md""" )
assert os.path.exists(__UpperCAmelCase )
__magic_name__: str = DatasetInfosDict.from_directory(__UpperCAmelCase )
__magic_name__: Optional[int] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__magic_name__, __magic_name__: Tuple = getattr(dataset_infos["""default"""] , __UpperCAmelCase ), getattr(expected_dataset_infos["""default"""] , __UpperCAmelCase )
if key == "num_bytes":
assert is_apercent_close(__UpperCAmelCase , __UpperCAmelCase )
elif key == "splits":
assert list(__UpperCAmelCase ) == list(__UpperCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 213 | 0 |
'''simple docstring'''
import qiskit
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int ):
__A : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__A : str = qiskit.QuantumCircuit(UpperCamelCase__, UpperCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1] )
# Execute the circuit on the qasm simulator
__A : Union[str, Any] = qiskit.execute(UpperCamelCase__, UpperCamelCase__, shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 365 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowercase ( ):
__A : Dict = ArgumentParser('Accelerate CLI tool', usage='accelerate <command> [<args>]', allow_abbrev=UpperCamelCase__ )
__A : Any = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__A : Optional[Any] = parser.parse_args()
if not hasattr(UpperCamelCase__, 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 365 | 1 |
def snake_case_ ( lowercase__ : int = 50_00_00_00 ):
'''simple docstring'''
_lowerCAmelCase =set()
_lowerCAmelCase =int((limit - 24) ** (1 / 2) )
_lowerCAmelCase =set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase__ ) ) )
for primea in primes:
_lowerCAmelCase =primea * primea
for primea in primes:
_lowerCAmelCase =primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_lowerCAmelCase =primea * primea * primea * primea
_lowerCAmelCase =square + cube + tetr
if total >= limit:
break
ret.add(lowercase__ )
return len(lowercase__ )
if __name__ == "__main__":
print(F'{solution() = }')
| 149 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : List[str] = logging.get_logger(__name__)
_A : List[Any] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Dict = """audio-spectrogram-transformer"""
def __init__( self , A_=7_68 , A_=12 , A_=12 , A_=30_72 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1E-12 , A_=16 , A_=True , A_=10 , A_=10 , A_=10_24 , A_=1_28 , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = frequency_stride
SCREAMING_SNAKE_CASE__ = time_stride
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = num_mel_bins
| 100 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 8_4_7
SCREAMING_SNAKE_CASE__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_5_0
SCREAMING_SNAKE_CASE__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_7_1
SCREAMING_SNAKE_CASE__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 1_3_3
SCREAMING_SNAKE_CASE__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_9
SCREAMING_SNAKE_CASE__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 6_5
SCREAMING_SNAKE_CASE__ = '''mapillary-vistas-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def __snake_case ( lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = dct.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def __snake_case ( ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 6_5
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 6_5_5_3_5
else:
SCREAMING_SNAKE_CASE__ = 2_5_5
SCREAMING_SNAKE_CASE__ = True if '''ade''' in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_processor(lowerCAmelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model(**lowerCAmelCase_ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_A : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 100 | 1 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A: str = logging.getLogger(__name__)
class UpperCAmelCase :
def __init__( self ):
__UpperCAmelCase = False
def __lowerCamelCase ( self , __A , __A , __A , __A ):
if not self.initialized:
__UpperCAmelCase = RagRetriever(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
__UpperCAmelCase = True
def __lowerCamelCase ( self ):
self.retriever.index.init_index()
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = self.retriever._main_retrieve(__A , __A )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase ( UpperCamelCase_ ):
def __init__( self , __A , __A , __A , __A , __A=None ):
if index is not None and index.is_initialized() and len(__A ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
__UpperCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__A , __A , __A , __A )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , __A , __A ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCAmelCase = ray.get(random_worker.retrieve.remote(__A , __A ) )
else:
__UpperCAmelCase = self._main_retrieve(__A , __A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__A )
@classmethod
def __lowerCamelCase ( cls , __A , __A=None , **__A ):
return super(__A , cls ).get_tokenizers(__A , __A , **__A )
@classmethod
def __lowerCamelCase ( cls , __A , __A , __A=None , **__A ):
__UpperCAmelCase = kwargs.pop('config' , __A ) or RagConfig.from_pretrained(__A , **__A )
__UpperCAmelCase = RagTokenizer.from_pretrained(__A , config=__A )
__UpperCAmelCase = rag_tokenizer.question_encoder
__UpperCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCAmelCase = '''custom'''
__UpperCAmelCase = CustomHFIndex(config.retrieval_vector_size , __A )
else:
__UpperCAmelCase = cls._build_index(__A )
return cls(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , retrieval_workers=__A , index=__A , )
| 719 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=128 , __A=32 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self ):
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
__UpperCAmelCase = True
__UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = NezhaModel(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A )
__UpperCAmelCase = model(__A , token_type_ids=__A )
__UpperCAmelCase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
__UpperCAmelCase = True
__UpperCAmelCase = NezhaModel(__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , encoder_hidden_states=__A , )
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = NezhaForMaskedLM(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = NezhaForNextSentencePrediction(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = NezhaForPreTraining(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , next_sentence_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = NezhaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = NezhaForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = NezhaForTokenClassification(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A ):
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = NezhaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_A : List[str] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_A : str = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Dict = True
def __lowerCamelCase ( self , __A , __A , __A=False ):
__UpperCAmelCase = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def __lowerCamelCase ( self ):
__UpperCAmelCase = NezhaModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def __lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
__A , __A , __A , __A , __A , __A , __A , __A , __A , )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCamelCase ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase = NezhaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__UpperCAmelCase = True
__UpperCAmelCase = model_class(config=__A )
__UpperCAmelCase = self._prepare_for_class(__A , __A )
__UpperCAmelCase = torch.jit.trace(
__A , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , 'bert.pt' ) )
__UpperCAmelCase = torch.jit.load(os.path.join(__A , 'bert.pt' ) , map_location=__A )
loaded(inputs_dict['input_ids'].to(__A ) , inputs_dict['attention_mask'].to(__A ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
__UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase = model(__A , attention_mask=__A )[0]
__UpperCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __A )
__UpperCAmelCase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
@slow
def __lowerCamelCase ( self ):
__UpperCAmelCase = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
__UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase = model(__A , attention_mask=__A )[0]
__UpperCAmelCase = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , __A )
__UpperCAmelCase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) )
| 617 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : int = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
__lowercase : Tuple = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
__lowercase : Dict = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class lowerCAmelCase ( _A ):
"""simple docstring"""
__lowercase :Tuple = "whisper"
__lowercase :int = ["past_key_values"]
__lowercase :Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase__=51_865 , UpperCamelCase__=80 , UpperCamelCase__=6 , UpperCamelCase__=4 , UpperCamelCase__=6 , UpperCamelCase__=4 , UpperCamelCase__=1_536 , UpperCamelCase__=1_536 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=50_257 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="gelu" , UpperCamelCase__=256 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=False , UpperCamelCase__=1_500 , UpperCamelCase__=448 , UpperCamelCase__=50_256 , UpperCamelCase__=50_256 , UpperCamelCase__=50_256 , UpperCamelCase__=None , UpperCamelCase__=[220, 50_256] , UpperCamelCase__=False , UpperCamelCase__=256 , UpperCamelCase__=False , UpperCamelCase__=0.05 , UpperCamelCase__=10 , UpperCamelCase__=2 , UpperCamelCase__=0.0 , UpperCamelCase__=10 , UpperCamelCase__=0 , UpperCamelCase__=7 , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = vocab_size
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ = max_source_positions
lowerCamelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ = classifier_proj_size
lowerCamelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
lowerCamelCase_ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , suppress_tokens=UpperCamelCase__ , begin_suppress_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
class lowerCAmelCase ( _A ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase_ = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
lowerCamelCase_ = {0: '''batch'''}
else:
lowerCamelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='''inputs''' )
return common_inputs
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = 22_050 , UpperCamelCase__ = 5.0 , UpperCamelCase__ = 220 , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase__ , framework=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , time_duration=UpperCamelCase__ , frequency=UpperCamelCase__ , )
lowerCamelCase_ = encoder_inputs['''input_features'''].shape[2]
lowerCamelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = encoder_inputs.pop('''input_features''' )
lowerCamelCase_ = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
lowerCamelCase_ = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def _lowerCAmelCase ( self ) -> float:
'''simple docstring'''
return 1e-3 | 142 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class _UpperCAmelCase :
def __init__( self : Dict , A : Tuple , A : Union[str, Any]=13 , A : Dict=7 , A : Union[str, Any]=True , A : int=True , A : int=False , A : int=True , A : List[Any]=99 , A : List[str]=32 , A : Dict=5 , A : int=4 , A : Union[str, Any]=37 , A : Any="gelu" , A : List[Any]=0.1 , A : Dict=0.1 , A : Optional[Any]=5_12 , A : List[str]=16 , A : Tuple=2 , A : List[Any]=0.02 , A : int=3 , A : Any=4 , A : Tuple=None , ) -> int:
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : Optional[int] = is_training
lowercase_ : int = use_input_mask
lowercase_ : List[str] = use_token_type_ids
lowercase_ : Optional[Any] = use_labels
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : Optional[int] = num_choices
lowercase_ : List[str] = scope
def A ( self : Dict ) -> Dict:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : str = None
if self.use_input_mask:
lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Any = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Dict = None
lowercase_ : Union[str, Any] = None
lowercase_ : Optional[int] = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Union[str, Any] ) -> Optional[Any]:
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , use_stable_embedding=A , )
def A ( self : Optional[Any] , A : Tuple , A : str , A : Union[str, Any] , A : Dict , A : Tuple , A : Any , A : Any ) -> List[str]:
lowercase_ : str = OpenLlamaModel(config=A )
model.to(A )
model.eval()
lowercase_ : Any = model(A , attention_mask=A )
lowercase_ : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Any , A : Dict , A : int , A : Optional[Any] , A : str , A : Union[str, Any] , A : List[Any] , A : Optional[int] , A : List[Any] , A : Tuple , ) -> int:
lowercase_ : int = True
lowercase_ : Tuple = OpenLlamaModel(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
lowercase_ : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , )
lowercase_ : Optional[Any] = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , A : Union[str, Any] , A : List[str] , A : str , A : Any , A : Optional[Any] , A : Tuple , A : str , A : int , A : Any , ) -> Optional[int]:
lowercase_ : Dict = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
lowercase_ : Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Dict , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] , A : Tuple , A : Union[str, Any] , A : int , A : Tuple , A : List[str] , A : str , ) -> Tuple:
lowercase_ : List[Any] = True
lowercase_ : List[str] = True
lowercase_ : Tuple = OpenLlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
lowercase_ : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
lowercase_ : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ : str = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
lowercase_ : int = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
lowercase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) )
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Any = config_and_inputs
lowercase_ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : int = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def A ( self : Dict ) -> List[str]:
lowercase_ : Optional[Any] = OpenLlamaModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=A , hidden_size=37 )
def A ( self : List[str] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def A ( self : int ) -> Optional[Any]:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : List[Any] ) -> int:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : List[Any] = type
self.model_tester.create_and_check_model(*A )
def A ( self : Dict ) -> str:
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = 3
lowercase_ : Any = input_dict['''input_ids''']
lowercase_ : Tuple = input_ids.ne(1 ).to(A )
lowercase_ : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : Optional[Any] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Tuple ) -> Optional[int]:
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Union[str, Any] = 3
lowercase_ : Optional[Any] = '''single_label_classification'''
lowercase_ : str = input_dict['''input_ids''']
lowercase_ : Tuple = input_ids.ne(1 ).to(A )
lowercase_ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ : Optional[int] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : Dict = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[str] ) -> Tuple:
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = 3
lowercase_ : List[Any] = '''multi_label_classification'''
lowercase_ : List[str] = input_dict['''input_ids''']
lowercase_ : str = input_ids.ne(1 ).to(A )
lowercase_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ : List[str] = OpenLlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowercase_ : List[str] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' )
def A ( self : Union[str, Any] ) -> Dict:
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def A ( self : List[Any] , A : List[str] ) -> int:
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = ids_tensor([1, 10] , config.vocab_size )
lowercase_ : Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : List[Any] = OpenLlamaModel(A )
original_model.to(A )
original_model.eval()
lowercase_ : Dict = original_model(A ).last_hidden_state
lowercase_ : Optional[Any] = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ : List[Any] = {'''type''': scaling_type, '''factor''': 10.0}
lowercase_ : List[str] = OpenLlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
lowercase_ : Dict = scaled_model(A ).last_hidden_state
lowercase_ : int = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1e-5 ) )
| 231 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class lowerCAmelCase :
def __init__( self : Any , UpperCAmelCase : int ) -> Tuple:
lowerCamelCase__ : list[list[Edge]] = [[] for _ in range(UpperCAmelCase )]
lowerCamelCase__ : Tuple = size
def __getitem__( self : Optional[Any] , UpperCAmelCase : int ) -> Iterator[Edge]:
return iter(self._graph[vertex] )
@property
def A_ ( self : int ) -> str:
return self._size
def A_ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[int]:
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCAmelCase , UpperCAmelCase ) )
def A_ ( self : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> int | None:
lowerCamelCase__ : int = deque([start_vertex] )
lowerCamelCase__ : list[int | None] = [None] * self.size
lowerCamelCase__ : Tuple = 0
while queue:
lowerCamelCase__ : str = queue.popleft()
lowerCamelCase__ : List[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase__ : Union[str, Any] = current_distance + edge.weight
lowerCamelCase__ : int = distances[edge.destination_vertex]
if (
isinstance(UpperCAmelCase , UpperCAmelCase )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase__ : Optional[int] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
class lowerCAmelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase : list ) -> None:
lowerCamelCase__ : int = set_counts
lowerCamelCase__ : List[str] = max(UpperCAmelCase )
lowerCamelCase__ : Dict = len(UpperCAmelCase )
lowerCamelCase__ : List[str] = [1] * num_sets
lowerCamelCase__ : List[str] = list(range(UpperCAmelCase ) )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int ) -> bool:
lowerCamelCase__ : List[str] = self.get_parent(UpperCAmelCase )
lowerCamelCase__ : List[str] = self.get_parent(UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCamelCase__ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Tuple = src_parent
lowerCamelCase__ : List[str] = self.set_counts[src_parent]
lowerCamelCase__ : Optional[int] = max(self.max_set , UpperCAmelCase )
return True
def A_ ( self : str , UpperCAmelCase : int ) -> int:
if self.parents[disj_set] == disj_set:
return disj_set
lowerCamelCase__ : Dict = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 188 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , 'num_encoder_blocks' ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE_=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE_=[16, 32, 64, 128] , SCREAMING_SNAKE_CASE_=[1, 4, 8, 16] , SCREAMING_SNAKE_CASE_=[1, 2, 4, 8] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ) -> str:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = num_encoder_blocks
lowerCamelCase_ = sr_ratios
lowerCamelCase_ = depths
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = downsampling_rates
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = SegformerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = lowerCamelCase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 1
lowerCamelCase_ = SegformerForSemanticSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = SegformerModelTester(self )
lowerCamelCase_ = SegformerConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
lowerCamelCase_ = sum(self.model_tester.depths )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
lowerCamelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCamelCase_ = (self.model_tester.image_size // 32) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCamelCase_ = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first attentions (first block, first layer)
lowerCamelCase_ = (self.model_tester.image_size // 4) ** 2
lowerCamelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ = outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
continue
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
lowerCamelCase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = SegformerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Optional[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-1 ) )
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCamelCase_ = encoded_inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = outputs.logits.detach().cpu()
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ , target_sizes=[(500, 300)] )
lowerCamelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE_ )
| 42 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=33 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , ) -> int:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = EsmModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = ()
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = EsmModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase_ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase_ = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE_ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase_ = EsmEmbeddings(config=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.empty(2 , 4 , 30 )
lowerCamelCase_ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase_ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase_ = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE_ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@slow
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase_ = 33
lowerCamelCase_ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor(
[[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
lowerCamelCase_ = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowerCamelCase_ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
lowerCamelCase_ = torch.tensor(
[[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 42 | 1 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_SCREAMING_SNAKE_CASE = """us-east-1""" # defaults region
@dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : str
_SCREAMING_SNAKE_CASE : Optional[Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
_SCREAMING_SNAKE_CASE : Dict = {**hyperparameters, 'max_steps': 1000}
@property
def lowerCAmelCase ( self : Optional[Any] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCAmelCase ( self : str ):
return F'''{self.framework}-transfromers-test'''
@property
def lowerCAmelCase ( self : List[str] ):
return F'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCAmelCase ( self : Optional[Any] ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
__snake_case = SageMakerTestEnvironment(framework=request.cls.framework )
| 614 |
"""simple docstring"""
from collections.abc import Sequence
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
__snake_case = nums[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
__snake_case = nums[i]
__snake_case = max(SCREAMING_SNAKE_CASE , ans + num , SCREAMING_SNAKE_CASE )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip())
_SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 614 | 1 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 109 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __a ( _snake_case ):
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase ,"""num_attention_heads""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase ,"""num_encoder_blocks""" ) )
class __a :
def __init__( self : Optional[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : List[Any]=13 ,lowerCamelCase : Union[str, Any]=64 ,lowerCamelCase : Dict=3 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Optional[Any]=[2, 2, 2, 2] ,lowerCamelCase : Tuple=[8, 4, 2, 1] ,lowerCamelCase : Dict=[16, 32, 64, 128] ,lowerCamelCase : Tuple=[1, 4, 8, 16] ,lowerCamelCase : str=[1, 2, 4, 8] ,lowerCamelCase : str=True ,lowerCamelCase : Union[str, Any]=True ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : int=3 ,lowerCamelCase : List[str]=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_encoder_blocks
__SCREAMING_SNAKE_CASE = sr_ratios
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = downsampling_rates
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase ,labels=lowerCamelCase )
self.parent.assertGreater(result.loss ,0.0 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Union[str, Any] = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerModelTester(self )
__SCREAMING_SNAKE_CASE = SegformerConfigTester(self ,config_class=lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
__SCREAMING_SNAKE_CASE = sum(self.model_tester.depths )
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# verify the first attentions (first block, first layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 32) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
# Check attention is always last and order is fine
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
self.assertEqual(out_len + 1 ,len(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.attentions
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# verify the first attentions (first block, first layer)
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // 4) ** 2
__SCREAMING_SNAKE_CASE = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ):
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ):
continue
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = SegformerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __a ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=lowerCamelCase ,align=lowerCamelCase ,do_random_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,lowerCamelCase ,atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=lowerCamelCase ,align=lowerCamelCase ,do_random_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(lowerCamelCase )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,lowerCamelCase ,atol=1E-1 ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=lowerCamelCase ,align=lowerCamelCase ,do_random_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
lowerCamelCase )
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = encoded_inputs.pixel_values.to(lowerCamelCase )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
__SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase ,target_sizes=[(500, 300)] )
__SCREAMING_SNAKE_CASE = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,lowerCamelCase )
| 109 | 1 |
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod() | 57 |
from typing import Any
def lowercase_ ( __snake_case : list , __snake_case : list , __snake_case : dict , __snake_case : dict , __snake_case : dict , ) -> list:
'''simple docstring'''
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
snake_case__ :dict = {}
snake_case__ :dict = {}
for state in states_space:
snake_case__ :List[Any] = observations_space[0]
snake_case__ :str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ :str = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
snake_case__ :Any = observations_space[o]
snake_case__ :Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ :Tuple = ""
snake_case__ :Union[str, Any] = -1
for k_state in states_space:
snake_case__ :int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ :str = probability
snake_case__ :Tuple = k_state
# Update probabilities and pointers dicts
snake_case__ :List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ :List[str] = arg_max
# The final observation
snake_case__ :str = observations_space[len(__snake_case ) - 1]
# argmax for given final observation
snake_case__ :Optional[int] = ""
snake_case__ :List[str] = -1
for k_state in states_space:
snake_case__ :List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ :List[str] = probability
snake_case__ :int = k_state
snake_case__ :Any = arg_max
# Process pointers backwards
snake_case__ :int = last_state
snake_case__ :List[str] = []
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
snake_case__ :List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowercase_ ( __snake_case : Any , __snake_case : Any ) -> None:
'''simple docstring'''
_validate_list(__snake_case , "observations_space" )
_validate_list(__snake_case , "states_space" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :Optional[int] = F'{var_name} must be a list'
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
snake_case__ :Any = F'{var_name} must be a list of strings'
raise ValueError(__snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : Any , __snake_case : Any , ) -> None:
'''simple docstring'''
_validate_dict(__snake_case , "initial_probabilities" , __snake_case )
_validate_nested_dict(__snake_case , "transition_probabilities" )
_validate_nested_dict(__snake_case , "emission_probabilities" )
def lowercase_ ( __snake_case : Any , __snake_case : str ) -> None:
'''simple docstring'''
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase_ ( __snake_case : Any , __snake_case : str , __snake_case : type , __snake_case : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , __snake_case ):
snake_case__ :str = F'{var_name} must be a dict'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
snake_case__ :List[Any] = F'{var_name} all keys must be strings'
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
snake_case__ :Optional[int] = "nested dictionary " if nested else ""
snake_case__ :int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod() | 57 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _snake_case :
'''simple docstring'''
def __init__( self : str ):
UpperCAmelCase_ :Union[str, Any] = psutil.Process()
UpperCAmelCase_ :Any = False
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Optional[int] = -1
while True:
UpperCAmelCase_ :Union[str, Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case_ ( self : Union[str, Any] ):
UpperCAmelCase_ :List[Any] = True
UpperCAmelCase_ :Tuple = threading.Thread(target=self.peak_monitor )
UpperCAmelCase_ :Dict = True
self.thread.start()
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Optional[Any] = False
self.thread.join()
return self.cpu_memory_peak
__lowerCamelCase = PeakCPUMemory()
def a ( ):
'''simple docstring'''
UpperCAmelCase_ :Any = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ :Tuple = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ :int = torch.cuda.memory_allocated(__UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def a ( __snake_case : List[str] ):
'''simple docstring'''
UpperCAmelCase_ :str = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
UpperCAmelCase_ :Optional[int] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
UpperCAmelCase_ :List[str] = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
UpperCAmelCase_ :Union[str, Any] = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
UpperCAmelCase_ :Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20
return measures
def a ( __snake_case : Tuple, __snake_case : str ):
'''simple docstring'''
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB' )
UpperCAmelCase_ :Optional[Any] = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 608 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A__ : Dict = logging.get_logger(__name__)
A__ : Dict = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
A__ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
A__ : str = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
A__ : List[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
A__ : Dict = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
A__ : Optional[Any] = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
A__ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
A__ : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
A__ : Any = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
A__ : Optional[int] = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
A__ : str = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
A__ : Union[str, Any] = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
A__ : List[Any] = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
A__ : List[Any] = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
A__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A__ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A__ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A__ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A__ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A__ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A__ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A__ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A__ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A__ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A__ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A__ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_MAPPING
A__ : Tuple = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A__ : Tuple = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A__ : int = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A__ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A__ : Dict = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A__ : List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A__ : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A__ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A__ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A__ : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A__ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class lowercase ( _BaseAutoModelClass ):
__a = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A__ : Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 233 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ : Any= logging.getLogger(__name__)
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCamelCase :
a : str =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a : Optional[str] =field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a : Optional[str] =field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCamelCase :
a : str =field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
a : str =field(metadata={"""help""": """Should contain the data files for the task."""} )
a : int =field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : bool =field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def lowerCAmelCase_( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase__ = processors[data_args.task_name]()
UpperCamelCase__ = processor.get_labels()
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=SCREAMING_SNAKE_CASE , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
UpperCamelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(SCREAMING_SNAKE_CASE , p.label_ids )}
# Data collator
UpperCamelCase__ = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_master():
with open(SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
writer.write('%s = %s\n' % (key, value) )
results.update(SCREAMING_SNAKE_CASE )
return results
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 704 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A__ : Dict= logging.get_logger(__name__)
A__ : str= {"""vocab_file""": """spiece.model"""}
A__ : Union[str, Any]= {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
A__ : Union[str, Any]= {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
A__ : Optional[Any]= """▁"""
class __lowerCamelCase ( _a ):
a : Dict =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_=100 , snake_case_=None , snake_case_ = None , snake_case_=True , **snake_case_ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [F'<extra_id_{i}>' for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda snake_case_ : bool('extra_id' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
' read the related pull request available at https://github.com/huggingface/transformers/pull/24565' )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , legacy=snake_case_ , **snake_case_ , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , snake_case_ , )
return max_model_length
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
return self.sp_model.get_piece_size() + self._extra_ids
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
UpperCamelCase__ = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(snake_case_ )) + [1]
return ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
return list(
set(filter(lambda snake_case_ : bool(re.search(r'<extra_id_\d+>' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return [self._convert_token_to_id(snake_case_ ) for token in self.get_sentinel_tokens()]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[int]:
if len(snake_case_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> List[int]:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(snake_case_ )
return token_ids_a + token_ids_a
def __getstate__( self ) -> str:
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , snake_case_ ) -> Any:
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[str]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(snake_case_ , ' ' )
return super().tokenize(snake_case_ , **snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , **snake_case_ ) -> List[Any]:
if not self.legacy:
UpperCamelCase__ = text.startswith(snake_case_ )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(snake_case_ , out_type=snake_case_ )
if not self.legacy and not is_first and not text.startswith(' ' ) and tokens[0].startswith(snake_case_ ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
if token.startswith('<extra_id_' ):
UpperCamelCase__ = re.match(r'<extra_id_(\d+)>' , snake_case_ )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> Optional[int]:
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(snake_case_ )
else:
UpperCamelCase__ = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ ) -> List[str]:
UpperCamelCase__ = []
UpperCamelCase__ = ''
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(snake_case_ )
UpperCamelCase__ = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , snake_case_ , snake_case_ = None ) -> Tuple[str]:
if not os.path.isdir(snake_case_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , 'wb' ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 20 | 0 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A ( _lowercase ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A ( ):
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
SCREAMING_SNAKE_CASE : Tuple = [1, 2, 3]
with pytest.raises(_lowercase ):
with parallel_backend('''unsupported backend''' ):
map_nested(_lowercase , _lowercase , num_proc=2 )
with pytest.raises(_lowercase ):
with parallel_backend('''unsupported backend''' ):
map_nested(_lowercase , _lowercase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = [1, 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''a''': 1, '''b''': 2}
SCREAMING_SNAKE_CASE : List[str] = {'''a''': [1, 2], '''b''': [3, 4]}
SCREAMING_SNAKE_CASE : int = {'''a''': {'''1''': 1}, '''b''': 2}
SCREAMING_SNAKE_CASE : Tuple = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
SCREAMING_SNAKE_CASE : Dict = [2, 3]
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''a''': 2, '''b''': 3}
SCREAMING_SNAKE_CASE : Optional[Any] = {'''a''': [2, 3], '''b''': [4, 5]}
SCREAMING_SNAKE_CASE : Optional[int] = {'''a''': {'''1''': 2}, '''b''': 3}
SCREAMING_SNAKE_CASE : List[Any] = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
assert map_nested(_lowercase , _lowercase , num_proc=_lowercase ) == expected_map_nested_sa
| 248 | import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.functional.normalize(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = nn.functional.normalize(_lowercase )
return torch.mm(_lowercase , normalized_text_embeds.t() )
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = CLIPConfig
UpperCamelCase_ = ["""CLIPEncoderLayer"""]
def __init__( self : str , UpperCamelCase__ : CLIPConfig ):
'''simple docstring'''
super().__init__(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = CLIPVisionModel(config.vision_config )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase__ )
@torch.no_grad()
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.vision_model(UpperCamelCase__ )[1] # pooled_output
SCREAMING_SNAKE_CASE : Any = self.visual_projection(UpperCamelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE : str = cosine_distance(UpperCamelCase__ , self.special_care_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(UpperCamelCase__ , self.concept_embeds ).cpu().float().numpy()
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = image_embeds.shape[0]
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
SCREAMING_SNAKE_CASE : Dict = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE : Optional[int] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
SCREAMING_SNAKE_CASE : Optional[Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
SCREAMING_SNAKE_CASE : Optional[int] = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE : List[str] = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE : Dict = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCamelCase__ )
result.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.vision_model(UpperCamelCase__ )[1] # pooled_output
SCREAMING_SNAKE_CASE : Union[str, Any] = self.visual_projection(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = cosine_distance(UpperCamelCase__ , self.special_care_embeds )
SCREAMING_SNAKE_CASE : Any = cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE : int = 0.0
SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE : Any = torch.any(special_scores > 0 , dim=1 )
SCREAMING_SNAKE_CASE : Any = special_care * 0.01
SCREAMING_SNAKE_CASE : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
SCREAMING_SNAKE_CASE : List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE : Tuple = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 248 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_:Optional[Any] = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:str = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:Dict = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 520 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=3, lowerCamelCase__=224, lowerCamelCase__=30, lowerCamelCase__=400, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__=[0.5, 0.5, 0.5], lowerCamelCase__=[0.5, 0.5, 0.5], ):
A : Dict = size if size is not None else {"""height""": 18, """width""": 18}
A : Optional[int] = parent
A : int = batch_size
A : List[Any] = num_channels
A : Optional[Any] = image_size
A : Union[str, Any] = min_resolution
A : List[str] = max_resolution
A : List[Any] = do_resize
A : List[Any] = size
A : Union[str, Any] = do_normalize
A : Union[str, Any] = image_mean
A : str = image_std
def _lowerCAmelCase ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = ViTImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
A : List[str] = EfficientFormerImageProcessorTester(self )
@property
def _lowerCAmelCase ( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__, """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__, """size""" ) )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : List[Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, Image.Image )
# Test not batched input
A : List[str] = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Dict = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : str = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__, numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, np.ndarray )
# Test not batched input
A : Union[str, Any] = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Any = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
def _lowerCAmelCase ( self ):
# Initialize image_processor
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : Optional[Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=lowerCamelCase__, torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__, torch.Tensor )
# Test not batched input
A : str = image_processor(image_inputs[0], return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
# Test batched
A : Tuple = image_processor(lowerCamelCase__, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
), )
| 520 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A :
def __init__( self : Any , __magic_name__ : Dict , __magic_name__ : Optional[Any]=2 , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=False , __magic_name__ : Optional[int]=10 , __magic_name__ : Optional[int]=3 , __magic_name__ : Optional[int]=32 * 4 , __magic_name__ : int=32 * 6 , __magic_name__ : List[Any]=4 , __magic_name__ : List[Any]=32 , ) -> Dict:
"""simple docstring"""
__snake_case : str = parent
__snake_case : int = batch_size
__snake_case : int = is_training
__snake_case : Tuple = use_auxiliary_loss
__snake_case : List[str] = num_queries
__snake_case : str = num_channels
__snake_case : Dict = min_size
__snake_case : int = max_size
__snake_case : List[Any] = num_labels
__snake_case : Union[str, Any] = mask_feature_size
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__magic_name__ )
__snake_case : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ )
__snake_case : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5
).float()
__snake_case : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long()
__snake_case : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Any = self.prepare_config_and_inputs()
__snake_case : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = output.encoder_hidden_states
__snake_case : List[str] = output.pixel_decoder_hidden_states
__snake_case : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__magic_name__ ) , config.decoder_config.decoder_layers )
def lowercase__ ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Tuple=False ) -> int:
"""simple docstring"""
with torch.no_grad():
__snake_case : str = MaskFormerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[Any] = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
__snake_case : int = model(__magic_name__ , output_hidden_states=__magic_name__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__magic_name__ , __magic_name__ )
def lowercase__ ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = MaskFormerForInstanceSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
def comm_check_on_output(__magic_name__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__snake_case : Tuple = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ )
__snake_case : List[Any] = model(__magic_name__ )
comm_check_on_output(__magic_name__ )
__snake_case : List[Any] = model(
pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
comm_check_on_output(__magic_name__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowercase__: Tuple = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowercase__: Optional[Any] = False
lowercase__: Union[str, Any] = False
lowercase__: Optional[int] = False
lowercase__: Dict = False
def lowercase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = MaskFormerModelTester(self )
__snake_case : Optional[int] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ )
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__magic_name__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowercase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[str] = model_class(__magic_name__ )
__snake_case : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
__snake_case : str = MaskFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Any = (self.model_tester.min_size,) * 2
__snake_case : List[Any] = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__magic_name__ ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__magic_name__ ),
"""class_labels""": torch.zeros(2 , 10 , device=__magic_name__ ).long(),
}
__snake_case : str = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__magic_name__ )
__snake_case : Optional[Any] = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : List[Any] = model_class(__magic_name__ ).to(__magic_name__ )
__snake_case : Any = model(**__magic_name__ , output_attentions=__magic_name__ )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
__snake_case : Dict = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs()
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
__snake_case : Dict = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss
loss.backward()
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.all_model_classes[1]
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
__snake_case : Any = True
__snake_case : str = True
__snake_case : List[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.train()
__snake_case : str = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ )
__snake_case : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__snake_case : Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
__snake_case : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__snake_case : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__magic_name__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCamelCase = 1E-4
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> List[Any]:
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__snake_case : List[str] = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__magic_name__ )
__snake_case : List[str] = self.default_image_processor
__snake_case : Optional[int] = prepare_img()
__snake_case : Optional[int] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
__snake_case : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__snake_case : Dict = model(**__magic_name__ )
__snake_case : Tuple = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
__snake_case : Optional[Any] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
__snake_case : Dict = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(__magic_name__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__magic_name__ )
.eval()
)
__snake_case : List[Any] = self.default_image_processor
__snake_case : List[str] = prepare_img()
__snake_case : Tuple = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
__snake_case : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__snake_case : int = model(**__magic_name__ )
# masks_queries_logits
__snake_case : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case : Optional[int] = [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
__snake_case : Optional[Any] = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
__snake_case : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case : Optional[int] = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
__snake_case : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__magic_name__ )
.eval()
)
__snake_case : str = self.default_image_processor
__snake_case : Union[str, Any] = prepare_img()
__snake_case : List[str] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
__snake_case : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__magic_name__ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
__snake_case : str = model(**__magic_name__ )
# masks_queries_logits
__snake_case : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
__snake_case : List[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
__snake_case : Union[str, Any] = torch.tensor(__magic_name__ ).to(__magic_name__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
# class_queries_logits
__snake_case : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
__snake_case : Dict = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__magic_name__ )
.eval()
)
__snake_case : List[str] = self.default_image_processor
__snake_case : str = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="""pt""" , )
__snake_case : List[str] = inputs["""pixel_values"""].to(__magic_name__ )
__snake_case : Optional[Any] = [el.to(__magic_name__ ) for el in inputs["""mask_labels"""]]
__snake_case : Dict = [el.to(__magic_name__ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
self.assertTrue(outputs.loss is not None )
| 26 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374 | 0 |
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_snake_case = "Usage of script: script_name <size_of_canvas:int>"
_snake_case = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def __snake_case ( SCREAMING_SNAKE_CASE: Union[str, Any] ):
"""simple docstring"""
_lowerCAmelCase = [[False for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
return canvas
def __snake_case ( SCREAMING_SNAKE_CASE: Optional[Any] ):
"""simple docstring"""
for i, row in enumerate(__lowerCAmelCase ):
for j, _ in enumerate(__lowerCAmelCase ):
_lowerCAmelCase = bool(random.getrandbits(1 ) )
def __snake_case ( SCREAMING_SNAKE_CASE: Any ):
"""simple docstring"""
_lowerCAmelCase = np.array(__lowerCAmelCase )
_lowerCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCAmelCase ):
for c, pt in enumerate(__lowerCAmelCase ):
_lowerCAmelCase = __judge_point(
__lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_lowerCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_lowerCAmelCase = current_canvas.tolist()
return return_canvas
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: Tuple ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_lowerCAmelCase = pt
if pt:
if alive < 2:
_lowerCAmelCase = False
elif alive == 2 or alive == 3:
_lowerCAmelCase = True
elif alive > 3:
_lowerCAmelCase = False
else:
if alive == 3:
_lowerCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_snake_case = int(sys.argv[1])
# main working structure of this module.
_snake_case = create_canvas(canvas_size)
seed(c)
_snake_case = plt.subplots()
fig.show()
_snake_case = ListedColormap(['''w''', '''k'''])
try:
while True:
_snake_case = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 712 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_snake_case = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str
SCREAMING_SNAKE_CASE_: List[str]
SCREAMING_SNAKE_CASE_: Optional[List[str]]
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[int]
SCREAMING_SNAKE_CASE_: List[int]
SCREAMING_SNAKE_CASE_: Optional[List[int]] = None
SCREAMING_SNAKE_CASE_: Optional[List[int]] = None
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = "train"
SCREAMING_SNAKE_CASE_: Tuple = "dev"
SCREAMING_SNAKE_CASE_: List[str] = "test"
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : List[InputExample] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=-100 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=True , ) -> List[InputFeatures]:
"""simple docstring"""
_lowerCAmelCase = {label: i for i, label in enumerate(UpperCAmelCase_ )}
_lowerCAmelCase = []
for ex_index, example in enumerate(UpperCAmelCase_ ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , UpperCAmelCase_ , len(UpperCAmelCase_ ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
_lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCAmelCase_ ) > 0:
tokens.extend(UpperCAmelCase_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCAmelCase_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(UpperCAmelCase_ ) > max_seq_length - special_tokens_count:
_lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
_lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowerCAmelCase = [sequence_a_segment_id] * len(UpperCAmelCase_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowerCAmelCase = [cls_token] + tokens
_lowerCAmelCase = [pad_token_label_id] + label_ids
_lowerCAmelCase = [cls_token_segment_id] + segment_ids
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(UpperCAmelCase_ )
# Zero-pad up to the sequence length.
_lowerCAmelCase = max_seq_length - len(UpperCAmelCase_ )
if pad_on_left:
_lowerCAmelCase = ([pad_token] * padding_length) + input_ids
_lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
_lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(UpperCAmelCase_ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(UpperCAmelCase_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , label_ids=UpperCAmelCase_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[InputFeatures]
SCREAMING_SNAKE_CASE_: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , UpperCAmelCase_ : TokenClassificationTask , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Split = Split.train , ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = os.path.join(
UpperCAmelCase_ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_lowerCAmelCase = torch.load(UpperCAmelCase_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCAmelCase_ , UpperCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase_ : List[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[InputFeatures]
SCREAMING_SNAKE_CASE_: int = -1_0_0
def __init__( self : Tuple , UpperCAmelCase_ : TokenClassificationTask , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Split = Split.train , ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCAmelCase_ , UpperCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : int , UpperCAmelCase_ : List[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 491 | 0 |
import operator as op
a = 'scaler.pt'
a = 'pytorch_model'
a = 'random_states'
a = 'optimizer'
a = 'scheduler'
a = 'pytorch_model.bin'
a = 'pytorch_model.bin.index.json'
a = 'model.safetensors'
a = 'model.safetensors.index.json'
a = '1.10.2'
a = 'py38'
a = '4.17.0'
a = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
a = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
a = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
a = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
a = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
a = '2.0.1'
a = ['pdsh', 'standard', 'openmpi', 'mvapich']
a = ['default', 'reduce-overhead', 'max-autotune']
a = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
a = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
a = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 412 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : List[Any] , **UpperCamelCase__ : Any ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Tuple , UpperCamelCase__ : Union[np.ndarray, bytes, str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {}
if "candidate_labels" in kwargs:
lowercase_ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowercase_ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int="This is a sound of {}." ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase_ = requests.get(UpperCamelCase__ ).content
else:
with open(UpperCamelCase__ , """rb""" ) as f:
lowercase_ = f.read()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = ffmpeg_read(UpperCamelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowercase_ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
lowercase_ = candidate_labels
lowercase_ = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels]
lowercase_ = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ )
lowercase_ = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = model_inputs.pop("""candidate_labels""" )
lowercase_ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase__ ):
lowercase_ = text_inputs[0]
else:
# Batching case.
lowercase_ = text_inputs[0][0]
lowercase_ = self.model(**UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = model_outputs.pop("""candidate_labels""" )
lowercase_ = model_outputs["""logits"""][0]
if self.framework == "pt":
lowercase_ = logits.softmax(dim=0 )
lowercase_ = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowercase_ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 412 | 1 |
__SCREAMING_SNAKE_CASE = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1055.05585,
"footpound": 1.35_5818,
}
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
A : List[Any] = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 17 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for a, b in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
A : List[Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowerCamelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
A : Union[str, Any] = None
ops.enable_eager_execution_internal()
A : Tuple = tf.config.list_physical_devices("CPU" )
if len(__lowerCamelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
A : Dict = tf.config.list_logical_devices(device_type="CPU" )
A : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
A : Optional[int] = GradientAccumulator()
A : Tuple = tf.Variable([4.0, 3.0] )
A , A : List[Any] = create_optimizer(5e-5 , 10 , 5 )
A : List[str] = tf.Variable([0.0, 0.0] , trainable=__lowerCamelCase )
def accumulate_on_replica(__lowerCamelCase : Tuple ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
with strategy.scope():
A : int = strategy.experimental_local_results(__lowerCamelCase )
local_variables[0].assign(__lowerCamelCase )
local_variables[1].assign(__lowerCamelCase )
strategy.run(__lowerCamelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowerCamelCase )
def _check_local_values(__lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
A : Optional[int] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , __lowerCamelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , __lowerCamelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 17 | 1 |
from collections import deque
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Tuple = len(UpperCAmelCase__ )
lowerCamelCase : Tuple = deque()
lowerCamelCase : str = [False for _ in range(UpperCAmelCase__ )]
lowerCamelCase : str = [-1 for _ in range(UpperCAmelCase__ )]
lowerCamelCase : int = index_of[:]
def strong_connect(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = index # the number when this node is seen
lowerCamelCase : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(UpperCAmelCase__ )
lowerCamelCase : str = True
for w in g[v]:
if index_of[w] == -1:
lowerCamelCase : Tuple = strong_connect(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ )
lowerCamelCase : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
lowerCamelCase : int = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Any = stack.pop()
lowerCamelCase : Optional[Any] = False
component.append(UpperCAmelCase__ )
while w != v:
lowerCamelCase : str = stack.pop()
lowerCamelCase : Tuple = False
component.append(UpperCAmelCase__ )
components.append(UpperCAmelCase__ )
return index
lowerCamelCase : Tuple = []
for v in range(UpperCAmelCase__ ):
if index_of[v] == -1:
strong_connect(UpperCAmelCase__ ,0 ,UpperCAmelCase__ )
return components
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowerCamelCase : List[Any] = [[] for _ in range(UpperCAmelCase__ )]
for u, v in edges:
g[u].append(UpperCAmelCase__ )
return g
if __name__ == "__main__":
# Test
SCREAMING_SNAKE_CASE__ : str = 7
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
SCREAMING_SNAKE_CASE__ : int = [1, 3, 2, 0, 1, 4, 5, 6, 5]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [(u, v) for u, v in zip(source, target)]
SCREAMING_SNAKE_CASE__ : str = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 311 | class lowercase : # Public class to implement a graph
def __init__( self : Union[str, Any] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = row
SCREAMING_SNAKE_CASE = col
SCREAMING_SNAKE_CASE = graph
def __snake_case( self : Optional[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> bool:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __snake_case( self : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : list[list[bool]] ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1]
SCREAMING_SNAKE_CASE = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCamelCase )
def __snake_case( self : Any ) -> int: # And finally, count all islands.
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )]
SCREAMING_SNAKE_CASE = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
count += 1
return count
| 403 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A () -> Optional[int]:
'''simple docstring'''
_a = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
_a = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert('RGB' )
return image
def _A (lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
_a = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def _A (lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
_a = dct.pop(lowerCAmelCase__ )
_a = val
def _A (lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_a = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_a = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_a = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase__ , requires_grad=lowerCAmelCase__ ), v_bias) )
_a = qkv_bias
def _A (lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
_a = 3_64 if 'coco' in model_name else 2_24
_a = InstructBlipVisionConfig(image_size=lowerCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_a = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_a = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_a = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict()
elif "vicuna-13b" in model_name:
_a = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_a = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict()
_a = InstructBlipConfig(vision_config=lowerCAmelCase__ , text_config=lowerCAmelCase__ , qformer_config=lowerCAmelCase__ )
return config, image_size
@torch.no_grad()
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Any=False ) -> Optional[int]:
'''simple docstring'''
_a = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
_a = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_a = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
_a , _a = get_blipa_config(lowerCAmelCase__ )
_a = InstructBlipForConditionalGeneration(lowerCAmelCase__ ).eval()
_a = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
_a , _a = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_a = 'cuda:1' if torch.cuda.is_available() else 'cpu'
_a = 'cuda:2' if torch.cuda.is_available() else 'cpu'
_a , _a , _a = load_model_and_preprocess(
name=lowerCAmelCase__ , model_type=lowerCAmelCase__ , is_eval=lowerCAmelCase__ , device=lowerCAmelCase__ )
original_model.eval()
print('Done!' )
# update state dict keys
_a = original_model.state_dict()
_a = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_a = state_dict.pop(lowerCAmelCase__ )
if key.startswith('Qformer.bert' ):
_a = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_a = key.replace('self' , 'attention' )
if "llm_proj" in key:
_a = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
_a = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
_a = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
_a = key.replace('t5' , 'language' )
_a = val
# read in qv biases
read_in_q_v_bias(lowerCAmelCase__ , lowerCAmelCase__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
_a = load_demo_image()
_a = 'What is unusual about this image?'
# create processor
_a = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ )
_a = InstructBlipProcessor(
image_processor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ , )
_a = processor(images=lowerCAmelCase__ , text=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# make sure processor creates exact same pixel values
_a = vis_processors['eval'](lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
_a = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
hf_model.to(lowerCAmelCase__ )
with torch.no_grad():
if "vicuna" in model_name:
_a = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
_a = hf_model(**lowerCAmelCase__ ).logits
else:
_a = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
_a = tokenizer('\n' , return_tensors='pt' ).input_ids.to(lowerCAmelCase__ )
_a = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 )
_a = hf_model(**lowerCAmelCase__ , labels=lowerCAmelCase__ ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_a = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , lowerCAmelCase__ , atol=lowerCAmelCase__ )
print('Looks ok!' )
print('Generating with original model...' )
_a = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
_a = hf_model.generate(
**lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_a = 2
print('Original generation:' , lowerCAmelCase__ )
_a = processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
_a = [text.strip() for text in output_text]
print('HF generation:' , lowerCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCAmelCase__ )
hf_model.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
processor.push_to_hub(f'Salesforce/{model_name}' )
hf_model.push_to_hub(f'Salesforce/{model_name}' )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
a_ : Optional[Any] = [
"instructblip-vicuna-7b",
"instructblip-vicuna-13b",
"instructblip-flan-t5-xl",
"instructblip-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
a_ : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 532 |
'''simple docstring'''
class a :
def __init__( self ) -> List[Any]:
_a = ''
_a = ''
_a = []
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_a = self.__min_dist_top_down_dp(__magic_name__ , n - 1 )
_a = self.__min_dist_top_down_dp(m - 1 , __magic_name__ )
_a = self.__min_dist_top_down_dp(m - 1 , n - 1 )
_a = 1 + min(__magic_name__ , __magic_name__ , __magic_name__ )
return self.dp[m][n]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
_a = worda
_a = worda
_a = [[-1 for _ in range(len(__magic_name__ ) )] for _ in range(len(__magic_name__ ) )]
return self.__min_dist_top_down_dp(len(__magic_name__ ) - 1 , len(__magic_name__ ) - 1 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
_a = worda
_a = worda
_a = len(__magic_name__ )
_a = len(__magic_name__ )
_a = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_a = j
elif j == 0: # second string is empty
_a = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_a = self.dp[i - 1][j - 1]
else:
_a = self.dp[i][j - 1]
_a = self.dp[i - 1][j]
_a = self.dp[i - 1][j - 1]
_a = 1 + min(__magic_name__ , __magic_name__ , __magic_name__ )
return self.dp[m][n]
if __name__ == "__main__":
a_ : Dict = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
a_ : Tuple = input("Enter the first string: ").strip()
a_ : Dict = input("Enter the second string: ").strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 532 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
lowerCamelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
lowerCamelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase__ = False
@property
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
return 3_2
@property
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
return 3_2
@property
def SCREAMING_SNAKE_CASE_ ( self :int ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
return 1_0_0
@property
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : int = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE : int = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self :Any ):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : int = self.dummy_unet
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_movq
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Any , _lowerCamelCase :Optional[Any]=0 ):
__SCREAMING_SNAKE_CASE : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCamelCase )
# create init_image
__SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(_lowerCamelCase ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create hint
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : str = torch.manual_seed(_lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : int = '''cpu'''
__SCREAMING_SNAKE_CASE : str = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : List[Any] = output.images
__SCREAMING_SNAKE_CASE : Dict = pipe(
**self.get_dummy_inputs(_lowerCamelCase ) , return_dict=_lowerCamelCase , )[0]
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
__SCREAMING_SNAKE_CASE : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__SCREAMING_SNAKE_CASE : Any = init_image.resize((5_1_2, 5_1_2) )
__SCREAMING_SNAKE_CASE : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
__SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.array(_lowerCamelCase ) ).float() / 2_5_5.0
__SCREAMING_SNAKE_CASE : Any = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__SCREAMING_SNAKE_CASE : Dict = '''A robot, 4k photo'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = pipe_prior(
_lowerCamelCase , image=_lowerCamelCase , strength=0.8_5 , generator=_lowerCamelCase , negative_prompt='''''' , ).to_tuple()
__SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline(
image=_lowerCamelCase , image_embeds=_lowerCamelCase , negative_image_embeds=_lowerCamelCase , hint=_lowerCamelCase , generator=_lowerCamelCase , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase )
| 674 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( lowercase_ : Callable , lowercase_ : float , lowercase_ : float , lowercase_ : float , lowercase_ : float ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = int(np.ceil((x_end - xa) / step_size ) )
__SCREAMING_SNAKE_CASE : Dict = np.zeros((n + 1,) )
__SCREAMING_SNAKE_CASE : List[Any] = ya
__SCREAMING_SNAKE_CASE : Dict = xa
for k in range(lowercase_ ):
__SCREAMING_SNAKE_CASE : str = y[k] + step_size * ode_func(lowercase_ , y[k] )
__SCREAMING_SNAKE_CASE : int = y[k] + (
(step_size / 2) * (ode_func(lowercase_ , y[k] ) + ode_func(x + step_size , lowercase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 674 | 1 |
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( _snake_case : Dict, _snake_case : Tuple, _snake_case : Optional[Any], _snake_case : str ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowercase = array[indexa], array[indexa]
def __UpperCAmelCase ( _snake_case : Union[str, Any], _snake_case : Optional[Any], _snake_case : Dict, _snake_case : Optional[Any] ):
if length > 1:
_lowercase = int(length / 2 )
for i in range(lowerCAmelCase_, low + middle ):
comp_and_swap(lowerCAmelCase_, lowerCAmelCase_, i + middle, lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
bitonic_merge(lowerCAmelCase_, low + middle, lowerCAmelCase_, lowerCAmelCase_ )
def __UpperCAmelCase ( _snake_case : Optional[Any], _snake_case : Optional[Any], _snake_case : Dict, _snake_case : Optional[Any] ):
if length > 1:
_lowercase = int(length / 2 )
bitonic_sort(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, 1 )
bitonic_sort(lowerCAmelCase_, low + middle, lowerCAmelCase_, 0 )
bitonic_merge(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
__UpperCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
__UpperCamelCase : Optional[Any] = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ") | 707 | """simple docstring"""
def __UpperCAmelCase ( _snake_case : int = 1_0_0_0_0_0_0 ):
_lowercase = limit + 1
_lowercase = [0] * limit
for first_term in range(1, _snake_case ):
for n in range(_snake_case, _snake_case, _snake_case ):
_lowercase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowercase = sum(1 for x in frequency[1:limit] if x == 1_0 )
return count
if __name__ == "__main__":
print(f'''{solution() = }''') | 227 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_ = do_convert_rgb
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> str:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='size' , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCamelCase_ = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE__ = Lock()
def lowercase ( a , a , a , a , a , a , a ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ :str = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ :int = min(a , a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ :int = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ :Dict = max(a , a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(a )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = []
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ :str = Pipe()
SCREAMING_SNAKE_CASE_ :Optional[Any] = Pipe()
process_array_.append(
Process(
target=a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = temp_rs
SCREAMING_SNAKE_CASE_ :Any = temp_rr
for i in range(1 , len(a ) - 1 ):
SCREAMING_SNAKE_CASE_ :int = Pipe()
SCREAMING_SNAKE_CASE_ :Dict = Pipe()
process_array_.append(
Process(
target=a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = temp_rs
SCREAMING_SNAKE_CASE_ :int = temp_rr
process_array_.append(
Process(
target=a , args=(
len(a ) - 1,
arr[len(a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(a ) ):
SCREAMING_SNAKE_CASE_ :Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*a )
SCREAMING_SNAKE_CASE_ :int = odd_even_transposition(a )
print("Sorted List\n" )
print(*a )
if __name__ == "__main__":
main()
| 631 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Any , __A : List[Any] , __A : int=1_3 , __A : List[Any]=3_0 , __A : Dict=2 , __A : Any=3 , __A : List[Any]=True , __A : List[str]=True , __A : Optional[int]=3_2 , __A : List[str]=5 , __A : int=4 , __A : int=3_7 , __A : List[Any]="gelu" , __A : int=0.1 , __A : List[Any]=0.1 , __A : Optional[Any]=1_0 , __A : Optional[int]=0.0_2 , __A : Union[str, Any]=None , __A : List[Any]=2 , ):
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = scope
_lowercase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase = (image_size // patch_size) ** 2
_lowercase = num_patches + 1
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def snake_case ( self : List[str] ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def snake_case ( self : Tuple , __A : int , __A : str , __A : List[str] ):
"""simple docstring"""
_lowercase = ViTModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[Any] , __A : Tuple , __A : List[str] , __A : Optional[int] ):
"""simple docstring"""
_lowercase = ViTForMaskedImageModeling(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowercase = 1
_lowercase = ViTForMaskedImageModeling(__A )
model.to(__A )
model.eval()
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def snake_case ( self : Optional[int] , __A : List[str] , __A : Optional[int] , __A : int ):
"""simple docstring"""
_lowercase = self.type_sequence_label_size
_lowercase = ViTForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase = 1
_lowercase = ViTForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = ViTModelTester(self )
_lowercase = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def snake_case ( self : Tuple ):
"""simple docstring"""
pass
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = ViTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def A__ ( ) -> Any:
_lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__A )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(**__A )
# verify the logits
_lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
_lowercase = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
@slow
def snake_case ( self : Any ):
"""simple docstring"""
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_lowercase = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__A )
_lowercase = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=4_8_0 )
_lowercase = prepare_img()
_lowercase = image_processor(images=__A , return_tensors="pt" )
_lowercase = inputs.pixel_values.to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(__A , interpolate_pos_encoding=__A )
# verify the logits
_lowercase = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
_lowercase = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A , return_tensors="pt" )
_lowercase = inputs.pixel_values.to(__A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowercase = model(__A )
| 602 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = BioGptTokenizer
UpperCAmelCase__ = False
def snake_case ( self : Dict ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowercase = dict(zip(__A , range(len(__A ) ) ) )
_lowercase = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def snake_case ( self : Optional[Any] , __A : List[Any] ):
"""simple docstring"""
_lowercase = "lower newer"
_lowercase = "lower newer"
return input_text, output_text
def snake_case ( self : List[Any] ):
"""simple docstring"""
_lowercase = BioGptTokenizer(self.vocab_file , self.merges_file )
_lowercase = "lower"
_lowercase = ["low", "er</w>"]
_lowercase = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
_lowercase = tokens + ["<unk>"]
_lowercase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
_lowercase = tokenizer.encode("sequence builders" , add_special_tokens=__A )
_lowercase = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A )
_lowercase = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 602 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 462 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = OmegaConf.load(lowercase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowercase_ ) ) )
return config
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if conf_path is None:
__UpperCAmelCase : Optional[int] = '''./model_checkpoints/vqgan_only.yaml'''
__UpperCAmelCase : Dict = load_config(lowercase_ , display=lowercase_ )
__UpperCAmelCase : Tuple = VQModel(**config.model.params )
if ckpt_path is None:
__UpperCAmelCase : Dict = '''./model_checkpoints/vqgan_only.pt'''
__UpperCAmelCase : int = torch.load(lowercase_ , map_location=lowercase_ )
if ".ckpt" in ckpt_path:
__UpperCAmelCase : Union[str, Any] = sd['''state_dict''']
model.load_state_dict(lowercase_ , strict=lowercase_ )
model.to(lowercase_ )
del sd
return model
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = model.encode(lowercase_ )
print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
__UpperCAmelCase : List[str] = model.decode(lowercase_ )
return xrec
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = string.rsplit('''.''' , 1 )
if reload:
__UpperCAmelCase : Optional[int] = importlib.import_module(lowercase_ )
importlib.reload(lowercase_ )
return getattr(importlib.import_module(lowercase_ , package=lowercase_ ) , cls )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=True , lowercase_=True ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = instantiate_from_config(lowercase_ )
if sd is not None:
model.load_state_dict(lowercase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if ckpt:
__UpperCAmelCase : Optional[int] = torch.load(lowercase_ , map_location='''cpu''' )
__UpperCAmelCase : Union[str, Any] = pl_sd['''global_step''']
print(f"loaded model from global step {global_step}." )
else:
__UpperCAmelCase : Union[str, Any] = {'''state_dict''': None}
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : str = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=lowercase_ , eval_mode=lowercase_ )['''model''']
return model, global_step
| 462 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :List[str] = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[str] = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Dict = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Tuple = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase_ ( __lowercase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A_ ( nn.Module ):
def __init__( self : str , snake_case_ : nn.Module , snake_case_ : int ):
super().__init__()
_UpperCAmelCase = module
_UpperCAmelCase = nn.Sequential(
nn.Linear(module.in_features , snake_case_ , bias=snake_case_ ) , nn.Linear(snake_case_ , module.out_features , bias=snake_case_ ) , )
_UpperCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=snake_case_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowercase ( self : int , snake_case_ : List[str] , *snake_case_ : Dict , **snake_case_ : Dict ):
return self.module(snake_case_ , *snake_case_ , **snake_case_ ) + self.adapter(snake_case_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowerCamelCase : Any = """bigscience/bloom-1b7"""
# Constant values
_lowerCamelCase : Optional[int] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
_lowerCamelCase : str = """Hello my name is"""
_lowerCamelCase : List[Any] = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_lowerCamelCase : List[Any] = 10
def lowercase ( self : Dict ):
# Models and tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : str ):
super().setUp()
# Models and tokenizer
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
def lowercase ( self : Any ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ):
_UpperCAmelCase = self.model_abit.config
self.assertTrue(hasattr(snake_case_ , "quantization_config" ) )
_UpperCAmelCase = config.to_dict()
_UpperCAmelCase = config.to_diff_dict()
_UpperCAmelCase = config.to_json_string()
def lowercase ( self : Optional[Any] ):
from bitsandbytes.nn import Paramsabit
_UpperCAmelCase = self.model_fpaa.get_memory_footprint()
_UpperCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
_UpperCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowercase ( self : Tuple ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(snake_case_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def lowercase ( self : Tuple ):
_UpperCAmelCase = BitsAndBytesConfig()
_UpperCAmelCase = True
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
def lowercase ( self : List[str] ):
with self.assertRaises(snake_case_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = BitsAndBytesConfig()
with self.assertRaises(snake_case_ ):
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=snake_case_ , load_in_abit=snake_case_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def lowercase ( self : List[Any] ):
with self.assertRaises(snake_case_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(snake_case_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(snake_case_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
_UpperCAmelCase = self.model_fpaa.to(torch.floataa )
_UpperCAmelCase = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.to("cpu" )
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
_UpperCAmelCase = self.model_fpaa.float()
def lowercase ( self : str ):
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=snake_case_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
@classmethod
def lowercase ( cls : List[Any] ):
_UpperCAmelCase = "t5-small"
_UpperCAmelCase = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
_UpperCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
_UpperCAmelCase = "Translate in German: Hello, my dog is cute"
def lowercase ( self : Dict ):
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Any ):
from transformers import TaForConditionalGeneration
_UpperCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
_UpperCAmelCase = None
# test with `t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
# test with `flan-t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
_UpperCAmelCase = modules
def lowercase ( self : Any ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
# test with `flan-t5-small`
_UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=snake_case_ , device_map="auto" )
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
_UpperCAmelCase = model.generate(**snake_case_ )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : List[str] ):
super().setUp()
# model_name
_UpperCAmelCase = "bigscience/bloom-560m"
_UpperCAmelCase = "t5-small"
# Different types of model
_UpperCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# Sequence classification model
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# CausalLM model
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ , device_map="auto" )
# Seq2seq model
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=snake_case_ , device_map="auto" )
def lowercase ( self : Dict ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Tuple ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Union[str, Any] ):
super().setUp()
def lowercase ( self : Optional[int] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_UpperCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Tuple ):
super().setUp()
def lowercase ( self : List[Any] ):
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=snake_case_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
_UpperCAmelCase = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
_UpperCAmelCase = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=snake_case_ ) , self.EXPECTED_OUTPUTS )
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Dict ):
_UpperCAmelCase = "facebook/opt-350m"
super().setUp()
def lowercase ( self : Dict ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=snake_case_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
_UpperCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_UpperCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(snake_case_ ) ):
_UpperCAmelCase = LoRALayer(module.q_proj , rank=1_6 )
_UpperCAmelCase = LoRALayer(module.k_proj , rank=1_6 )
_UpperCAmelCase = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
_UpperCAmelCase = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_UpperCAmelCase = model.forward(**snake_case_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(snake_case_ , snake_case_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(snake_case_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """gpt2-xl"""
_lowerCamelCase : str = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 119 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 4_0_0_0_0_0_0 ):
_A = []
_A , _A = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
_A , _A = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107 | '''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase__ : Dict = "sshleifer/mar_enro_6_3_student"
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
super().setUp()
UpperCAmelCase_ = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=UpperCAmelCase__ , )
UpperCAmelCase_ = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
MarianMTModel.from_pretrained(UpperCAmelCase__ )
@slow
@require_torch_gpu
def lowerCAmelCase__ ( self : int ) ->Dict:
UpperCAmelCase_ = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
UpperCAmelCase_ = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
UpperCAmelCase_ = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase_ = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase_ = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase_ = ['''finetune.py'''] + bash_script.split() + args
with patch.object(UpperCAmelCase__ , '''argv''' , UpperCAmelCase__ ):
UpperCAmelCase_ = argparse.ArgumentParser()
UpperCAmelCase_ = pl.Trainer.add_argparse_args(UpperCAmelCase__ )
UpperCAmelCase_ = SummarizationModule.add_model_specific_args(UpperCAmelCase__ , os.getcwd() )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = main(UpperCAmelCase__ )
# Check metrics
UpperCAmelCase_ = load_json(model.metrics_save_path )
UpperCAmelCase_ = metrics['''val'''][0]
UpperCAmelCase_ = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , UpperCAmelCase__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase_ = os.listdir(UpperCAmelCase__ )
UpperCAmelCase_ = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase_ = os.path.join(args.output_dir , UpperCAmelCase__ )
UpperCAmelCase_ = torch.load(UpperCAmelCase__ , map_location='''cpu''' )
UpperCAmelCase_ = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase_ = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
UpperCAmelCase_ = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
UpperCAmelCase_ = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
UpperCAmelCase_ = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
UpperCAmelCase_ = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
UpperCAmelCase_ = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
UpperCAmelCase_ = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = bash_script.replace('''--fp16''' , '''''' )
UpperCAmelCase_ = 6
UpperCAmelCase_ = (
['''distillation.py''']
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(UpperCAmelCase__ , '''argv''' , UpperCAmelCase__ ):
UpperCAmelCase_ = argparse.ArgumentParser()
UpperCAmelCase_ = pl.Trainer.add_argparse_args(UpperCAmelCase__ )
UpperCAmelCase_ = SummarizationDistiller.add_model_specific_args(UpperCAmelCase__ , os.getcwd() )
UpperCAmelCase_ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase_ = distill_main(UpperCAmelCase__ )
# Check metrics
UpperCAmelCase_ = load_json(model.metrics_save_path )
UpperCAmelCase_ = metrics['''val'''][0]
UpperCAmelCase_ = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , UpperCAmelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase_ = os.listdir(UpperCAmelCase__ )
UpperCAmelCase_ = [x for x in contents if x.endswith('''.ckpt''' )][0]
UpperCAmelCase_ = os.path.join(args.output_dir , UpperCAmelCase__ )
UpperCAmelCase_ = torch.load(UpperCAmelCase__ , map_location='''cpu''' )
UpperCAmelCase_ = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase_ = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 390 | 0 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 710 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
_lowerCamelCase : Optional[int] = TypeVar('''T''')
class lowerCAmelCase__ ( Generic[T] ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =None
__A =len(lowercase__ )
__A =[any_type for _ in range(self.N )] + arr
__A =fnc
self.build()
def __UpperCamelCase ( self ):
'''simple docstring'''
for p in range(self.N - 1 , 0 , -1 ):
__A =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
p += self.N
__A =v
while p > 1:
__A =p // 2
__A =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self , lowercase__ , lowercase__ ): # noqa: E741
'''simple docstring'''
__A , __A =l + self.N, r + self.N
__A =None
while l <= r:
if l % 2 == 1:
__A =self.st[l] if res is None else self.fn(lowercase__ , self.st[l] )
if r % 2 == 0:
__A =self.st[r] if res is None else self.fn(lowercase__ , self.st[r] )
__A , __A =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
_lowerCamelCase : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
_lowerCamelCase : Union[str, Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
_lowerCamelCase : Dict = SegmentTree(test_array, min)
_lowerCamelCase : int = SegmentTree(test_array, max)
_lowerCamelCase : Optional[Any] = SegmentTree(test_array, lambda a, b: a + b)
def A__ ( ) ->None:
for i in range(len(__A ) ):
for j in range(__A , len(__A ) ):
__A =reduce(__A , test_array[i : j + 1] )
__A =reduce(__A , test_array[i : j + 1] )
__A =reduce(lambda __A , __A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__A , __A )
assert max_range == max_segment_tree.query(__A , __A )
assert sum_range == sum_segment_tree.query(__A , __A )
test_all_segments()
for index, value in test_updates.items():
_lowerCamelCase : Union[str, Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 516 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Tuple ={"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict =[
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 113 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar('KT')
__lowerCAmelCase = TypeVar('VT')
class _lowerCAmelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__(self , UpperCAmelCase = "root" , UpperCAmelCase = None ) -> Tuple:
_snake_case = key
_snake_case = value
_snake_case = []
def __repr__(self ) -> str:
return f"""Node({self.key}: {self.value})"""
@property
def lowercase (self ) -> int:
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__(self , UpperCAmelCase = 0.5 , UpperCAmelCase = 16 ) -> int:
_snake_case = Node[KT, VT]()
_snake_case = 0
_snake_case = p
_snake_case = max_level
def __str__(self ) -> str:
_snake_case = list(self )
if len(UpperCAmelCase ) == 0:
return f"""SkipList(level={self.level})"""
_snake_case = max((len(str(UpperCAmelCase ) ) for item in items) , default=4 )
_snake_case = max(UpperCAmelCase , 4 ) + 4
_snake_case = self.head
_snake_case = []
_snake_case = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(UpperCAmelCase , """-""" ) + """* """ * len(UpperCAmelCase ) )
lines.append(""" """ * label_size + """| """ * len(UpperCAmelCase ) )
while len(node.forward ) != 0:
_snake_case = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(UpperCAmelCase , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(UpperCAmelCase ) )
_snake_case = node.forward
lines.append("""None""".ljust(UpperCAmelCase ) + """* """ * len(UpperCAmelCase ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(UpperCAmelCase )
def __iter__(self ) -> Any:
_snake_case = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_snake_case = node.forward[0]
def lowercase (self ) -> int:
_snake_case = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowercase (self , UpperCAmelCase ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
_snake_case = []
_snake_case = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_snake_case = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case, _snake_case = self._locate_node(UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_snake_case = node.forward[i]
else:
_snake_case = update_node.forward[:i]
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case, _snake_case = self._locate_node(UpperCAmelCase )
if node is not None:
_snake_case = value
else:
_snake_case = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , UpperCAmelCase ):
update_vector.append(self.head )
_snake_case = level
_snake_case = Node(UpperCAmelCase , UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(UpperCAmelCase )
else:
_snake_case = new_node
def lowercase (self , UpperCAmelCase ) -> VT | None:
_snake_case, _snake_case = self._locate_node(UpperCAmelCase )
if node is not None:
return node.value
return None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
_snake_case = skip_list.head
_snake_case = {}
while node.level != 0:
_snake_case = node.forward[0]
_snake_case = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
_snake_case = skip_list.head
_snake_case = {}
while node.level != 0:
_snake_case = node.forward[0]
_snake_case = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
assert skip_list.find("""Some key""" ) is None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(_SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __SCREAMING_SNAKE_CASE ( ):
def is_sorted(_SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
_snake_case = SkipList()
for i in range(10 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 585 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_UpperCAmelCase : List[Any] = HfArgumentParser(InitializationArguments)
_UpperCAmelCase : Optional[int] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_UpperCAmelCase : Union[str, Any] = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_UpperCAmelCase : Dict = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 188 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_UpperCAmelCase : Union[str, Any] = (3, 9, -11, 0, 7, 5, 1, -1)
_UpperCAmelCase : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCAmelCase :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
class lowerCAmelCase :
def __init__( self : Any , UpperCAmelCase : Iterable[int] ) -> None:
lowerCamelCase__ : Node | None = None
for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ):
lowerCamelCase__ : Tuple = Node(UpperCAmelCase , self.head )
def __iter__( self : Optional[int] ) -> Iterator[int]:
lowerCamelCase__ : str = self.head
while node:
yield node.data
lowerCamelCase__ : List[str] = node.next_node
def __len__( self : List[str] ) -> int:
return sum(1 for _ in self )
def __str__( self : Any ) -> str:
return " -> ".join([str(UpperCAmelCase ) for node in self] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> SortedLinkedList:
return SortedLinkedList(list(_UpperCAmelCase ) + list(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Optional[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 188 | 1 |
"""simple docstring"""
def a ( __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : float , ) -> float:
__magic_name__: Union[str, Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
__magic_name__: Dict = 1 - (matter_density + radiation_density + dark_energy)
__magic_name__: Optional[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__magic_name__: Any = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__lowerCamelCase = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 96 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__lowerCamelCase = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Any ) -> List[Any]:
if isinstance(__snake_case , __snake_case ):
__magic_name__: Dict = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self : Optional[Any] , __snake_case : str , __snake_case : List[Any] , __snake_case : List[Any] ) -> int:
if len(__snake_case ) == 0 or len(__snake_case ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(__snake_case ) )
if isinstance(__snake_case , __snake_case ):
__magic_name__: Any = [sequences]
__magic_name__: List[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__snake_case )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Dict , __snake_case : Dict=ZeroShotClassificationArgumentHandler() , *__snake_case : Dict , **__snake_case : Optional[int] ) -> int:
__magic_name__: List[str] = args_parser
super().__init__(*__snake_case , **__snake_case )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def lowerCamelCase__ ( self : List[str] ) -> str:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : List[Any]=True , __snake_case : Tuple=True , __snake_case : Tuple=TruncationStrategy.ONLY_FIRST , **__snake_case : Union[str, Any] ) -> int:
__magic_name__: Optional[int] = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
__magic_name__: Tuple = self.tokenizer.eos_token
try:
__magic_name__: List[Any] = self.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , padding=__snake_case , truncation=__snake_case , )
except Exception as e:
if "too short" in str(__snake_case ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__magic_name__: List[str] = self.tokenizer(
__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , padding=__snake_case , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCamelCase__ ( self : Any , **__snake_case : Optional[Any] ) -> int:
if kwargs.get("""multi_class""" , __snake_case ) is not None:
__magic_name__: List[str] = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
__magic_name__: Optional[Any] = {}
if "candidate_labels" in kwargs:
__magic_name__: str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
__magic_name__: Optional[Any] = kwargs["""hypothesis_template"""]
__magic_name__: int = {}
if "multi_label" in kwargs:
__magic_name__: int = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Any , __snake_case : Union[str, List[str]] , *__snake_case : Optional[int] , **__snake_case : List[str] , ) -> List[str]:
if len(__snake_case ) == 0:
pass
elif len(__snake_case ) == 1 and "candidate_labels" not in kwargs:
__magic_name__: str = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(__snake_case , **__snake_case )
def lowerCamelCase__ ( self : Any , __snake_case : int , __snake_case : List[Any]=None , __snake_case : Tuple="This example is {}." ) -> List[Any]:
__magic_name__, __magic_name__: int = self._args_parser(__snake_case , __snake_case , __snake_case )
for i, (candidate_label, sequence_pair) in enumerate(zip(__snake_case , __snake_case ) ):
__magic_name__: Dict = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__snake_case ) - 1,
**model_input,
}
def lowerCamelCase__ ( self : Dict , __snake_case : int ) -> Tuple:
__magic_name__: Optional[Any] = inputs["""candidate_label"""]
__magic_name__: List[str] = inputs["""sequence"""]
__magic_name__: Any = {k: inputs[k] for k in self.tokenizer.model_input_names}
__magic_name__: Optional[int] = self.model(**__snake_case )
__magic_name__: Optional[Any] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def lowerCamelCase__ ( self : Optional[int] , __snake_case : str , __snake_case : List[Any]=False ) -> Any:
__magic_name__: List[Any] = [outputs["""candidate_label"""] for outputs in model_outputs]
__magic_name__: Any = [outputs["""sequence"""] for outputs in model_outputs]
__magic_name__: Tuple = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
__magic_name__: Any = logits.shape[0]
__magic_name__: Any = len(__snake_case )
__magic_name__: List[str] = N // n
__magic_name__: int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__snake_case ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__magic_name__: List[Any] = self.entailment_id
__magic_name__: List[str] = -1 if entailment_id == 0 else 0
__magic_name__: Tuple = reshaped_outputs[..., [contradiction_id, entailment_id]]
__magic_name__: int = np.exp(__snake_case ) / np.exp(__snake_case ).sum(-1 , keepdims=__snake_case )
__magic_name__: List[str] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__magic_name__: List[str] = reshaped_outputs[..., self.entailment_id]
__magic_name__: Tuple = np.exp(__snake_case ) / np.exp(__snake_case ).sum(-1 , keepdims=__snake_case )
__magic_name__: Optional[Any] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 96 | 1 |
'''simple docstring'''
import argparse
import os
import re
__lowercase = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
__lowercase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
__lowercase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__lowercase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
__lowercase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__lowercase = re.compile(R'''\[([^\]]+)\]''')
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Tuple =_re_indent.search(_SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
lowerCAmelCase_ : Tuple =0
lowerCAmelCase_ : List[Any] =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(_SCREAMING_SNAKE_CASE ):
index += 1
lowerCAmelCase_ : Tuple =['''\n'''.join(lines[:index] )]
else:
lowerCAmelCase_ : Tuple =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ : Union[str, Any] =[lines[index]]
index += 1
while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
if index < len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase_ : str =[lines[index + 1]]
index += 1
else:
lowerCAmelCase_ : Any =[]
else:
blocks.append('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase_ : Dict =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_SCREAMING_SNAKE_CASE ) > 0:
blocks.append('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
def _inner(_SCREAMING_SNAKE_CASE ):
return key(_SCREAMING_SNAKE_CASE ).lower().replace('''_''' , '''''' )
return _inner
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
# If no key is provided, we use a noop.
def noop(_SCREAMING_SNAKE_CASE ):
return x
if key is None:
lowerCAmelCase_ : Optional[int] =noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ : Optional[Any] =[obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ : List[str] =[obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ : Tuple =[obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCAmelCase_ : int =ignore_underscore(_SCREAMING_SNAKE_CASE )
return sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
# This inner function sort imports between [ ].
def _replace(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : List[str] =match.groups()[0]
if "," not in imports:
return f'[{imports}]'
lowerCAmelCase_ : str =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : Tuple =keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]"
lowerCAmelCase_ : int =import_statement.split('''\n''' )
if len(_SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ : List[Any] =2 if lines[1].strip() == '''[''' else 1
lowerCAmelCase_ : Union[str, Any] =[(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ : List[Any] =sort_objects(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )
lowerCAmelCase_ : str =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ : Union[str, Any] =_re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ : str =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ : str =keys[:-1]
lowerCAmelCase_ : Optional[Any] =get_indent(lines[1] ) + ''', '''.join([f'"{k}"' for k in sort_objects(_SCREAMING_SNAKE_CASE )] )
return "\n".join(_SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ : str =_re_bracket_content.sub(_replace , _SCREAMING_SNAKE_CASE )
return import_statement
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
with open(_SCREAMING_SNAKE_CASE , '''r''' ) as f:
lowerCAmelCase_ : List[str] =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ : Optional[Any] =split_code_in_indented_blocks(
_SCREAMING_SNAKE_CASE , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ : Optional[Any] =main_blocks[block_idx]
lowerCAmelCase_ : Union[str, Any] =block.split('''\n''' )
# Get to the start of the imports.
lowerCAmelCase_ : str =0
while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ : Tuple =len(_SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(_SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ : str ='''\n'''.join(block_lines[line_idx:-1] )
lowerCAmelCase_ : Union[str, Any] =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ : List[str] =split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE , indent_level=_SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ : Union[str, Any] =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ : List[str] =[(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ : Union[str, Any] =[(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None]
lowerCAmelCase_ : str =[x[0] for x in sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ : str =0
lowerCAmelCase_ : Union[str, Any] =[]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ : Dict =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ : Optional[int] ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
f.write('''\n'''.join(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE=True ):
lowerCAmelCase_ : Any =[]
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCAmelCase_ : Any =sort_imports(os.path.join(_SCREAMING_SNAKE_CASE , '''__init__.py''' ) , check_only=_SCREAMING_SNAKE_CASE )
if result:
lowerCAmelCase_ : int =[os.path.join(_SCREAMING_SNAKE_CASE , '''__init__.py''' )]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f'Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__lowercase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 305 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) )
class _snake_case :
"""simple docstring"""
def __init__( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Union[str, Any]=32 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=640 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="silu" , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Tuple=32 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Any=0.0_2 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=10 , UpperCamelCase_ : List[Any]=None , ):
lowerCAmelCase_ : List[str] =parent
lowerCAmelCase_ : Tuple =batch_size
lowerCAmelCase_ : Tuple =image_size
lowerCAmelCase_ : Any =patch_size
lowerCAmelCase_ : Any =num_channels
lowerCAmelCase_ : Dict =last_hidden_size
lowerCAmelCase_ : Optional[int] =num_attention_heads
lowerCAmelCase_ : str =hidden_act
lowerCAmelCase_ : Dict =conv_kernel_size
lowerCAmelCase_ : int =output_stride
lowerCAmelCase_ : Tuple =hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] =attention_probs_dropout_prob
lowerCAmelCase_ : List[str] =classifier_dropout_prob
lowerCAmelCase_ : int =use_labels
lowerCAmelCase_ : Dict =is_training
lowerCAmelCase_ : Any =num_labels
lowerCAmelCase_ : Optional[Any] =initializer_range
lowerCAmelCase_ : List[str] =scope
def __A ( self : int ):
lowerCAmelCase_ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Any =None
lowerCAmelCase_ : Optional[Any] =None
if self.use_labels:
lowerCAmelCase_ : Optional[int] =ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : Optional[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ : str =self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self : Optional[Any] ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Any ):
lowerCAmelCase_ : Optional[Any] =MobileViTModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : Optional[Any] =model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
lowerCAmelCase_ : List[str] =self.num_labels
lowerCAmelCase_ : Tuple =MobileViTForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : str =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase_ : Optional[int] =self.num_labels
lowerCAmelCase_ : Any =MobileViTForSemanticSegmentation(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase_ : Union[str, Any] =model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self : Dict ):
lowerCAmelCase_ : Any =self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int =config_and_inputs
lowerCAmelCase_ : Tuple ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCamelCase : str = False
_UpperCamelCase : Any = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Any = False
def __A ( self : List[str] ):
lowerCAmelCase_ : str =MobileViTModelTester(self )
lowerCAmelCase_ : int =MobileViTConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def __A ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def __A ( self : int ):
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def __A ( self : Optional[int] ):
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def __A ( self : List[str] ):
pass
def __A ( self : Tuple ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] =model_class(UpperCamelCase_ )
lowerCAmelCase_ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] =[*signature.parameters.keys()]
lowerCAmelCase_ : Tuple =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self : Tuple ):
pass
def __A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __A ( self : Dict ):
def check_hidden_states_output(UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] =model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : List[str] =model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase_ : Dict =outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] =5
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase_ : Union[str, Any] =2
for i in range(len(UpperCamelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] =True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __A ( self : int ):
lowerCAmelCase_ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def __A ( self : Optional[Any] ):
lowerCAmelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ )
@slow
def __A ( self : List[Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Any =MobileViTModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( ):
lowerCAmelCase_ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self : Dict ):
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple =MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(UpperCamelCase_ )
lowerCAmelCase_ : Optional[Any] =self.default_image_processor
lowerCAmelCase_ : Optional[int] =prepare_img()
lowerCAmelCase_ : Union[str, Any] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int =model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase_ : Optional[int] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase_ : List[Any] =torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __A ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : Tuple =model.to(UpperCamelCase_ )
lowerCAmelCase_ : Any =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : int =prepare_img()
lowerCAmelCase_ : Optional[int] =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] =model(**UpperCamelCase_ )
lowerCAmelCase_ : List[str] =outputs.logits
# verify the logits
lowerCAmelCase_ : Dict =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase_ )
lowerCAmelCase_ : List[str] =torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=UpperCamelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def __A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] =MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : str =model.to(UpperCamelCase_ )
lowerCAmelCase_ : int =MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
lowerCAmelCase_ : Union[str, Any] =prepare_img()
lowerCAmelCase_ : str =image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[int] =model(**UpperCamelCase_ )
lowerCAmelCase_ : str =outputs.logits.detach().cpu()
lowerCAmelCase_ : Any =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)] )
lowerCAmelCase_ : Optional[int] =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
lowerCAmelCase_ : Tuple =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ )
lowerCAmelCase_ : List[str] =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
| 305 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict=2_8123 ) -> Optional[Any]:
_lowercase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_lowercase = set()
_lowercase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 67 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[str] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase : Tuple = 192
__lowercase : List[Any] = 768
__lowercase : Tuple = 12
__lowercase : List[Any] = 3
__lowercase : str = [800, 1333]
__lowercase : List[Any] = False
elif yolos_name == "yolos_s_dWr":
__lowercase : Any = 330
__lowercase : int = 14
__lowercase : List[str] = 6
__lowercase : Tuple = 1320
elif "yolos_s" in yolos_name:
__lowercase : int = 384
__lowercase : Union[str, Any] = 1536
__lowercase : List[str] = 12
__lowercase : Optional[Any] = 6
elif "yolos_b" in yolos_name:
__lowercase : List[Any] = [800, 1344]
__lowercase : Tuple = 91
__lowercase : Union[str, Any] = """huggingface/label-files"""
__lowercase : Any = """coco-detection-id2label.json"""
__lowercase : Dict = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Union[str, Any] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : List[str] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : YolosConfig , lowerCAmelCase_ : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : int = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__lowercase : str = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Any = in_proj_weight[: config.hidden_size, :]
__lowercase : Tuple = in_proj_bias[: config.hidden_size]
__lowercase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : Dict = in_proj_weight[-config.hidden_size :, :]
__lowercase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( lowerCAmelCase_ : str ):
if "backbone" in name:
__lowercase : Union[str, Any] = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__lowercase : Dict = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__lowercase : str = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__lowercase : Dict = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__lowercase : str = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__lowercase : List[str] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase : Optional[int] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase : List[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : str = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__lowercase : Optional[Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__lowercase : str = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__lowercase : List[str] = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
__lowercase : Optional[int] = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
__lowercase : int = key.split(""".""" )
__lowercase : List[str] = int(key_split[2] )
__lowercase : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase : Dict = val[:dim, :]
__lowercase : Union[str, Any] = val[
dim : dim * 2, :
]
__lowercase : Union[str, Any] = val[-dim:, :]
else:
__lowercase : str = val[:dim]
__lowercase : List[str] = val[dim : dim * 2]
__lowercase : Any = val[-dim:]
else:
__lowercase : List[str] = val
return orig_state_dict
def snake_case_ ( ):
__lowercase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ):
__lowercase : Optional[int] = get_yolos_config(lowerCAmelCase_ )
# load original state_dict
__lowercase : Any = torch.load(lowerCAmelCase_ , map_location="""cpu""" )["""model"""]
# load 🤗 model
__lowercase : Union[str, Any] = YolosForObjectDetection(lowerCAmelCase_ )
model.eval()
__lowercase : str = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase : str = 800 if yolos_name != """yolos_ti""" else 512
__lowercase : Dict = YolosImageProcessor(format="""coco_detection""" , size=lowerCAmelCase_ )
__lowercase : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase : Optional[int] = model(**lowerCAmelCase_ )
__lowercase , __lowercase : Tuple = outputs.logits, outputs.pred_boxes
__lowercase , __lowercase : Optional[Any] = None, None
if yolos_name == "yolos_ti":
__lowercase : Any = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowercase : Dict = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase : List[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowercase : Dict = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase : List[str] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowercase : List[str] = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase : str = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowercase : List[str] = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__lowercase : List[Any] = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowercase : Optional[Any] = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
__lowercase : Any = {
"""yolos_ti""": """yolos-tiny""",
"""yolos_s_200_pre""": """yolos-small""",
"""yolos_s_300_pre""": """yolos-small-300""",
"""yolos_s_dWr""": """yolos-small-dwr""",
"""yolos_base""": """yolos-base""",
}
print("""Pushing to the hub...""" )
__lowercase : List[str] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCAmelCase_ , organization="""hustvl""" )
model.push_to_hub(lowerCAmelCase_ , organization="""hustvl""" )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 149 | 0 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A_ ( __a ):
_A :int = 0
_A :bool = False
_A :float = 3.0
class A_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=snake_case__ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowercase = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
lowercase = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
lowercase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , snake_case__ )
@require_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowercase = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ , env=os.environ.copy() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
__SCREAMING_SNAKE_CASE : List[str] =Accelerator(kwargs_handlers=[ddp_scaler])
__SCREAMING_SNAKE_CASE : int =torch.nn.Linear(100, 200)
__SCREAMING_SNAKE_CASE : Optional[Any] =accelerator.prepare(model)
# Check the values changed in kwargs
__SCREAMING_SNAKE_CASE : Optional[Any] =''''''
__SCREAMING_SNAKE_CASE : int =model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 72 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class A_ ( __a ):
_A :List[str] = '''pix2struct_text_model'''
_A :int = ['''past_key_values''']
_A :Optional[Any] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ):
lowercase = vocab_size
lowercase = hidden_size
lowercase = d_kv
lowercase = d_ff
lowercase = num_layers
lowercase = num_heads
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = dropout_rate
lowercase = layer_norm_epsilon
lowercase = initializer_factor
lowercase = use_cache
lowercase = eos_token_id
lowercase = decoder_start_token_id
# for backwards compatibility
lowercase = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ):
cls._set_token_in_kwargs(snake_case__ )
lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class A_ ( __a ):
_A :Optional[int] = '''pix2struct_vision_model'''
def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ):
super().__init__(**snake_case__ )
lowercase = hidden_size
lowercase = patch_embed_hidden_size
lowercase = d_ff
lowercase = dropout_rate
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = initializer_range
lowercase = initializer_factor
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = dense_act_fn
lowercase = seq_len
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ):
cls._set_token_in_kwargs(snake_case__ )
lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class A_ ( __a ):
_A :int = '''pix2struct'''
_A :str = True
def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
lowercase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
lowercase = PixaStructTextConfig(**snake_case__ )
lowercase = PixaStructVisionConfig(**snake_case__ )
lowercase = self.text_config.decoder_start_token_id
lowercase = self.text_config.pad_token_id
lowercase = self.text_config.eos_token_id
lowercase = initializer_factor
lowercase = initializer_range
lowercase = self.initializer_range
lowercase = self.initializer_range
lowercase = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.text_config.to_dict()
lowercase = self.vision_config.to_dict()
lowercase = self.__class__.model_type
return output
| 72 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.