code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : List[Any] = int(__a )
_a, _a, _a : Optional[Any] = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any] , __a : Any , __a : List[Any]=3_0_0 ):
"""simple docstring"""
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
_a : Any = '<table border="1" class="dataframe">\n'
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
_a : Dict = f"""{elt:.6f}""" if isinstance(__a , __a ) else str(__a )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Any = 5
__UpperCAmelCase : int = 0.2
def __init__( self : int ,_a : int ,_a : Optional[str] = None ,_a : bool = True ,_a : Optional["NotebookTrainingTracker"] = None ,_a : int = 300 ,):
'''simple docstring'''
_a : int = total
_a : List[str] = '' if prefix is None else prefix
_a : List[Any] = leave
_a : Dict = parent
_a : Any = width
_a : str = None
_a : Dict = None
_a : Dict = None
def __lowercase ( self : Dict ,_a : int ,_a : bool = False ,_a : str = None ):
'''simple docstring'''
_a : List[str] = value
if comment is not None:
_a : List[Any] = comment
if self.last_value is None:
_a : Optional[int] = time.time()
_a : List[str] = value
_a : str = None
_a : Union[str, Any] = self.warmup
_a : List[Any] = 1
self.update_bar(_a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
_a : Optional[Any] = time.time()
_a : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
_a : Any = self.elapsed_time / (value - self.start_value)
else:
_a : Union[str, Any] = None
if value >= self.total:
_a : Optional[int] = self.total
_a : int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
_a : Dict = self.average_time_per_item * (self.total - value)
self.update_bar(_a )
_a : Any = value
_a : int = current_time
if self.average_time_per_item is None:
_a : Optional[Any] = 1
else:
_a : Dict = max(int(self.update_every / self.average_time_per_item ) ,1 )
def __lowercase ( self : int ,_a : Optional[int] ,_a : Optional[Any]=None ):
'''simple docstring'''
_a : List[Any] = ' ' * (len(str(self.total ) ) - len(str(_a ) )) + str(_a )
if self.elapsed_time is None:
_a : str = F"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
_a : str = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )}"""
else:
_a : Union[str, Any] = (
F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <"""
F""" {format_time(self.predicted_remaining )}"""
)
self.label += F""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F""", {self.comment}]"""
self.display()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
_a : Dict = disp.display(disp.HTML(self.html_code ) ,display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('' ) )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : Any ,_a : Optional[int]=None ):
'''simple docstring'''
super().__init__(_a )
_a : Tuple = None if column_names is None else [column_names]
_a : Optional[Any] = None
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : List[str] = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
_a : Any = disp.display(disp.HTML(self.html_code ) ,display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def __lowercase ( self : List[str] ,_a : Tuple ):
'''simple docstring'''
if self.inner_table is None:
_a : Tuple = [list(values.keys() ), list(values.values() )]
else:
_a : Optional[int] = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_a )
_a : str = columns
self.inner_table.append([values[c] for c in columns] )
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ,_a : List[Any]=None ,_a : Optional[Any]=300 ):
'''simple docstring'''
_a : Dict = NotebookProgressBar(_a ,prefix=_a ,parent=self ,width=_a )
return self.child_bar
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Dict = None
self.display()
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
_a : Optional[int] = None
_a : Tuple = None
_a : Optional[int] = False
def __lowercase ( self : int ,_a : List[Any] ,_a : Any ,_a : Union[str, Any] ,**_a : Optional[Any] ):
'''simple docstring'''
_a : Dict = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step'
_a : Tuple = 0
_a : Dict = 0
_a : Tuple = [self.first_column] + ['Training Loss']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('Validation Loss' )
_a : str = NotebookTrainingTracker(state.max_steps ,_a )
def __lowercase ( self : Any ,_a : List[str] ,_a : Dict ,_a : int ,**_a : Tuple ):
'''simple docstring'''
_a : Any = int(state.epoch ) if int(state.epoch ) == state.epoch else F"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 ,comment=F"""Epoch {epoch}/{state.num_train_epochs}""" ,force_update=self._force_next_update ,)
_a : List[str] = False
def __lowercase ( self : Any ,_a : str ,_a : Optional[Any] ,_a : List[Any] ,_a : Union[str, Any]=None ,**_a : int ):
'''simple docstring'''
if not has_length(_a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
_a : Dict = self.training_tracker.add_child(len(_a ) )
else:
_a : Optional[Any] = NotebookProgressBar(len(_a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def __lowercase ( self : Dict ,_a : Dict ,_a : List[Any] ,_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
_a : str = None
def __lowercase ( self : List[Any] ,_a : List[Any] ,_a : int ,_a : Any ,_a : Dict=None ,**_a : int ):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
_a : List[Any] = {'Training Loss': logs['loss']}
# First column is necessarily Step sine we're not in epoch eval strategy
_a : Optional[Any] = state.global_step
self.training_tracker.write_line(_a )
def __lowercase ( self : Tuple ,_a : int ,_a : Union[str, Any] ,_a : int ,_a : Optional[Any]=None ,**_a : int ):
'''simple docstring'''
if self.training_tracker is not None:
_a : List[str] = {'Training Loss': 'No log', 'Validation Loss': 'No log'}
for log in reversed(state.log_history ):
if "loss" in log:
_a : Any = log['loss']
break
if self.first_column == "Epoch":
_a : Optional[int] = int(state.epoch )
else:
_a : Dict = state.global_step
_a : List[str] = 'eval'
for k in metrics:
if k.endswith('_loss' ):
_a : List[str] = re.sub(R'\_loss$' ,'' ,_a )
_a : str = metrics.pop('total_flos' ,_a )
_a : Any = metrics.pop('epoch' ,_a )
_a : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_runtime""" ,_a )
_a : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" ,_a )
_a : List[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" ,_a )
_a : int = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" ,_a )
for k, v in metrics.items():
if k == F"""{metric_key_prefix}_loss""":
_a : Optional[int] = v
else:
_a : Union[str, Any] = k.split('_' )
_a : Dict = ' '.join([part.capitalize() for part in splits[1:]] )
_a : int = v
self.training_tracker.write_line(_a )
self.training_tracker.remove_child()
_a : Any = None
# Evaluation takes a long time so we should force the next update.
_a : Tuple = True
def __lowercase ( self : Optional[Any] ,_a : List[str] ,_a : Union[str, Any] ,_a : Tuple ,**_a : Optional[int] ):
'''simple docstring'''
self.training_tracker.update(
state.global_step ,comment=F"""Epoch {int(state.epoch )}/{state.num_train_epochs}""" ,force_update=_a )
_a : Optional[Any] = None
| 229 |
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__lowerCAmelCase = get_logger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[int] = '''dummy_data'''
__UpperCAmelCase : List[str] = '''datasets'''
__UpperCAmelCase : int = False
def __init__( self : Optional[Any] ,_a : str ,_a : str ,_a : Union[Version, str] ,_a : Optional[str] = None ,_a : bool = False ,_a : bool = True ,_a : Optional[List[Callable]] = None ,):
'''simple docstring'''
_a : List[Any] = 0
_a : List[Any] = dataset_name
_a : Any = cache_dir
_a : Tuple = use_local_dummy_data
_a : List[Any] = config
# download_callbacks take a single url as input
_a : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_a : Tuple = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_a : Union[str, Any] = str(_a )
# to be downloaded
_a : Optional[Any] = None
_a : str = None
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self._dummy_file is None:
_a : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' ,self.version_name )
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,'dummy_data.zip' )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_a : Tuple = cached_path(
_a ,cache_dir=self.cache_dir ,extract_compressed_file=_a ,force_extract=_a )
return os.path.join(_a ,self.dummy_file_name )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self._bucket_url is None:
_a : int = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,'/' ) )
return self._bucket_url
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,'/' ).split('/' )[:-1] )
def __lowercase ( self : Optional[int] ,_a : Tuple ,*_a : str ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_a : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_a : List[str] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_a ,_a ):
return self.create_dummy_data_dict(_a ,_a )
elif isinstance(_a ,(list, tuple) ):
return self.create_dummy_data_list(_a ,_a )
else:
return self.create_dummy_data_single(_a ,_a )
def __lowercase ( self : Any ,_a : Union[str, Any] ,*_a : List[str] ):
'''simple docstring'''
return self.download_and_extract(_a )
def __lowercase ( self : Any ,_a : Optional[Any] ,_a : int ):
'''simple docstring'''
return self.download_and_extract(_a )
def __lowercase ( self : Optional[int] ,_a : List[str] ,*_a : Union[str, Any] ,**_a : List[str] ):
'''simple docstring'''
return path
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return {}
def __lowercase ( self : Optional[int] ,_a : str ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_a ,_a ):
for single_url in single_urls:
download_callback(_a )
else:
_a : str = single_urls
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_a ,_a ):
_a : List[str] = [os.path.join(_a ,urllib.parse.quote_plus(Path(_a ).name ) ) for x in single_urls]
else:
_a : List[Any] = single_urls
_a : Optional[Any] = os.path.join(_a ,urllib.parse.quote_plus(Path(_a ).name ) )
_a : str = value
# make sure that values are unique
if all(isinstance(_a ,_a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_a : List[str] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowercase ( self : Any ,_a : Tuple ,_a : List[Any] ):
'''simple docstring'''
_a : int = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_a : Tuple = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' ,_a ) ) for url in data_url )
_a : Optional[int] = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_a : List[Any] = [data_url[0]] * len(_a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : str = os.path.join(_a ,urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(_a )
return dummy_data_list
def __lowercase ( self : Optional[Any] ,_a : str ,_a : Optional[int] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_a : Optional[Any] = os.path.join(_a ,urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(_a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __lowercase ( self : Optional[int] ,_a : str ):
'''simple docstring'''
def _iter_archive_members(_a : Any ):
# this preserves the order of the members inside the ZIP archive
_a : int = Path(self.dummy_file ).parent
_a : Any = path.relative_to(_a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_a : Tuple = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_a )
_a : Optional[Any] = Path(_a )
_a : str = _iter_archive_members(_a ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(_a ).as_posix(), file_path.open('rb' )
def __lowercase ( self : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
if not isinstance(_a ,_a ):
_a : Optional[int] = [paths]
for path in paths:
if os.path.isfile(_a ):
if os.path.basename(_a ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_a ):
if os.path.basename(_a ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(_a ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(_a ,_a )
| 229 | 1 |
"""simple docstring"""
def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int:
__UpperCamelCase = range(1 , UpperCAmelCase__ )
__UpperCamelCase = range(1 , UpperCAmelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 708 |
"""simple docstring"""
import string
import numpy
def A ( snake_case :int , snake_case :int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case )
class __lowerCAmelCase :
lowercase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase = numpy.vectorize(lambda __SCREAMING_SNAKE_CASE : x % 36 )
lowercase = numpy.vectorize(__SCREAMING_SNAKE_CASE )
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.modulus(__UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__UpperCamelCase = encrypt_key.shape[0]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key_string.index(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key_string[round(__UpperCAmelCase )]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = len(self.key_string )
if greatest_common_divisor(__UpperCAmelCase , len(self.key_string ) ) != 1:
__UpperCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [char for char in text.upper() if char in self.key_string]
__UpperCamelCase = chars[-1]
while len(__UpperCAmelCase ) % self.break_key != 0:
chars.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = ''
for i in range(0 , len(__UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(__UpperCAmelCase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(self.encrypt_key.dot(__UpperCAmelCase ) ).T.tolist()[
0
]
__UpperCamelCase = ''.join(
self.replace_digits(__UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__UpperCamelCase = i
break
__UpperCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__UpperCAmelCase ) )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.make_decrypt_key()
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = ''
for i in range(0 , len(__UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(__UpperCAmelCase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(decrypt_key.dot(__UpperCAmelCase ) ).T.tolist()[0]
__UpperCamelCase = ''.join(
self.replace_digits(__UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A ( ) -> None:
__UpperCamelCase = int(input('Enter the order of the encryption key: ' ) )
__UpperCamelCase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(snake_case ):
__UpperCamelCase = [int(snake_case ) for x in input().split()]
hill_matrix.append(snake_case )
__UpperCamelCase = HillCipher(numpy.array(snake_case ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__UpperCamelCase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__UpperCamelCase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(snake_case ) )
elif option == "2":
__UpperCamelCase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 293 | 0 |
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCAmelCase_ = '.'
if __name__ == "__main__":
UpperCAmelCase_ = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
UpperCAmelCase_ = []
UpperCAmelCase_ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCAmelCase_ = line.strip()
UpperCAmelCase_ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCAmelCase_ = '\n'.join(non_existent_paths)
raise ValueError(f"`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 603 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCAmelCase_ = namedtuple('covid_data', 'cases deaths recovered')
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
UpperCAmelCase__ = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE__ ).content ).xpath(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 603 | 1 |
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowercase_ = b * b - 4 * a * c
lowercase_ = (-b + sqrt(__lowerCamelCase )) / (2 * a)
lowercase_ = (-b - sqrt(__lowerCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ , lowercase_ = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 601 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float , __lowerCamelCase: float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 601 | 1 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__UpperCamelCase : List[str] = 6_3_7_8_1_3_7.0
__UpperCamelCase : Dict = 6_3_5_6_7_5_2.3_1_4_2_4_5
__UpperCamelCase : Tuple = 6_3_7_8_1_3_7
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowerCAmelCase__ : str = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
lowerCAmelCase__ : Optional[Any] = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowerCAmelCase__ : Union[str, Any] = haversine_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowerCAmelCase__ : Union[str, Any] = (b_lata + b_lata) / 2
lowerCAmelCase__ : List[str] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowerCAmelCase__ : List[str] = (sin(SCREAMING_SNAKE_CASE_ ) ** 2) * (cos(SCREAMING_SNAKE_CASE_ ) ** 2)
lowerCAmelCase__ : List[Any] = cos(sigma / 2 ) ** 2
lowerCAmelCase__ : Optional[Any] = (sigma - sin(SCREAMING_SNAKE_CASE_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowerCAmelCase__ : Union[str, Any] = (cos(SCREAMING_SNAKE_CASE_ ) ** 2) * (sin(SCREAMING_SNAKE_CASE_ ) ** 2)
lowerCAmelCase__ : Tuple = sin(sigma / 2 ) ** 2
lowerCAmelCase__ : Tuple = (sigma + sin(SCREAMING_SNAKE_CASE_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 450 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
A : int = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
A : Any = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : int = ''' Hello world! cécé herlolip'''
A : List[Any] = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
_lowercase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
_lowercase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowercase = val
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Any:
_lowercase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_lowercase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
_lowercase , _lowercase = emb.weight.shape
_lowercase = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None ) -> int:
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
_lowercase = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE_ ).eval()
else:
_lowercase = load_xsum_checkpoint(SCREAMING_SNAKE_CASE_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowercase = checkpoint_path.replace(""".""" , """-""" )
_lowercase = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_lowercase = bart.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
_lowercase = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).encode(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
_lowercase = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_lowercase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowercase = BartForSequenceClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowercase = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE_ , return_logits=SCREAMING_SNAKE_CASE_ )
_lowercase = model(SCREAMING_SNAKE_CASE_ )[0] # logits
else: # no classification heads to worry about
_lowercase = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_lowercase = state_dict["""decoder.embed_tokens.weight"""]
_lowercase = bart.extract_features(SCREAMING_SNAKE_CASE_ )
if hf_checkpoint_name == "facebook/bart-large":
_lowercase = BartModel(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowercase = model(SCREAMING_SNAKE_CASE_ ).model[0]
else:
_lowercase = BartForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , """lm_head""" ):
_lowercase = make_linear_from_emb(model.model.shared )
_lowercase = model.model(SCREAMING_SNAKE_CASE_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
A : Any = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config) | 287 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Optional[Any] = logging.get_logger(__name__)
class _a ( __lowerCamelCase ):
"""simple docstring"""
snake_case ='''encoder-decoder'''
snake_case =True
def __init__( self , **_snake_case ):
super().__init__(**a_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCAmelCase =kwargs.pop("encoder" )
_UpperCAmelCase =encoder_config.pop("model_type" )
_UpperCAmelCase =kwargs.pop("decoder" )
_UpperCAmelCase =decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase =AutoConfig.for_model(a_ , **a_ )
_UpperCAmelCase =AutoConfig.for_model(a_ , **a_ )
_UpperCAmelCase =True
@classmethod
def SCREAMING_SNAKE_CASE ( cls , _snake_case , _snake_case , **_snake_case ):
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_UpperCAmelCase =True
_UpperCAmelCase =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a_ )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =copy.deepcopy(self.__dict__ )
_UpperCAmelCase =self.encoder.to_dict()
_UpperCAmelCase =self.decoder.to_dict()
_UpperCAmelCase =self.__class__.model_type
return output
| 707 |
from importlib import import_module
from .logging import get_logger
snake_case__ : Dict = get_logger(__name__)
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=None ):
_UpperCAmelCase =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , _snake_case , getattr(_snake_case , _snake_case ) )
_UpperCAmelCase =module._original_module if isinstance(_snake_case , _PatchedModuleObj ) else module
class _a :
"""simple docstring"""
snake_case =[]
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None ):
_UpperCAmelCase =obj
_UpperCAmelCase =target
_UpperCAmelCase =new
_UpperCAmelCase =target.split("." )[0]
_UpperCAmelCase ={}
_UpperCAmelCase =attrs or []
def __enter__( self ):
*_UpperCAmelCase , _UpperCAmelCase =self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_snake_case ) ):
try:
_UpperCAmelCase =import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase =getattr(self.obj , _snake_case )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_snake_case , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase =obj_attr
# patch at top level
setattr(self.obj , _snake_case , _PatchedModuleObj(_snake_case , attrs=self.attrs ) )
_UpperCAmelCase =getattr(self.obj , _snake_case )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_snake_case , _snake_case , _PatchedModuleObj(getattr(_snake_case , _snake_case , _snake_case ) , attrs=self.attrs ) )
_UpperCAmelCase =getattr(_snake_case , _snake_case )
# finally set the target attribute
setattr(_snake_case , _snake_case , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase =getattr(import_module(".".join(_snake_case ) ) , _snake_case )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _snake_case ) is attr_value:
_UpperCAmelCase =getattr(self.obj , _snake_case )
setattr(self.obj , _snake_case , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase =globals()["__builtins__"][target_attr]
setattr(self.obj , _snake_case , self.new )
else:
raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self , *_snake_case ):
for attr in list(self.original ):
setattr(self.obj , _snake_case , self.original.pop(_snake_case ) )
def SCREAMING_SNAKE_CASE ( self ):
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 592 | 0 |
'''simple docstring'''
from itertools import count
def _lowerCAmelCase ( lowercase : int = 5_0 ) ->int:
"""simple docstring"""
lowercase__ = [1] * min_block_length
for n in count(lowercase ):
fill_count_functions.append(1 )
for block_length in range(lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 161 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowerCAmelCase ( lowercase : Union[dict, list, tuple, torch.Tensor] ) ->List[Tuple[int, ...]]:
"""simple docstring"""
lowercase__ = []
if isinstance(lowercase , lowercase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _lowerCAmelCase ( lowercase : int , lowercase : Tuple[int, ...] ) ->Tuple[int, ...]:
"""simple docstring"""
lowercase__ = []
for d in reversed(lowercase ):
idx.append(flat_idx % d )
lowercase__ = flat_idx // d
return tuple(reversed(lowercase ) )
@torch.jit.ignore
def _lowerCAmelCase ( lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Optional[Sequence[bool]] = None , lowercase : Optional[Sequence[bool]] = None , ) ->List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(lowercase : List[bool] ) -> None:
lowercase__ = True
for i in range(len(lowercase ) ):
lowercase__ = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ = l[reversed_idx]
if start_edges is None:
lowercase__ = [s == 0 for s in start]
reduce_edge_list(lowercase )
if end_edges is None:
lowercase__ = [e == (d - 1) for e, d in zip(lowercase , lowercase )]
reduce_edge_list(lowercase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase ) == 0:
return [()]
elif len(lowercase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowercase__ = []
lowercase__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase , lowercase ):
if s == e:
path_list.append(slice(lowercase , s + 1 ) )
else:
break
lowercase__ = tuple(lowercase )
lowercase__ = len(lowercase )
# start == end, and we're done
if divergence_idx == len(lowercase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = start[divergence_idx]
return tuple(
path + (slice(lowercase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = end[divergence_idx]
return tuple(
path + (slice(lowercase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowercase__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowerCAmelCase ( lowercase : torch.Tensor , lowercase : int , lowercase : int , lowercase : int ) ->torch.Tensor:
"""simple docstring"""
lowercase__ = t.shape[:no_batch_dims]
lowercase__ = list(_flat_idx_to_idx(lowercase , lowercase ) )
# _get_minimal_slice_set is inclusive
lowercase__ = list(_flat_idx_to_idx(flat_end - 1 , lowercase ) )
# Get an ordered list of slices to perform
lowercase__ = _get_minimal_slice_set(
lowercase , lowercase , lowercase , )
lowercase__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowerCAmelCase ( lowercase : Callable , lowercase : Dict[str, Any] , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : Any = None , lowercase : bool = False , ) ->Any:
"""simple docstring"""
if not (len(lowercase ) > 0):
raise ValueError('''Must provide at least one input''' )
lowercase__ = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase )]
lowercase__ = tuple([max(lowercase ) for s in zip(*lowercase )] )
def _prep_inputs(lowercase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowercase__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowercase__ = tensor_tree_map(_prep_inputs , lowercase )
lowercase__ = None
if _out is not None:
lowercase__ = tensor_tree_map(lambda lowercase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowercase__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ = 0
lowercase__ = prepped_outputs
for _ in range(lowercase ):
# Chunk the input
if not low_mem:
lowercase__ = _select_chunk
else:
lowercase__ = partial(
_chunk_slice , flat_start=lowercase , flat_end=min(lowercase , i + chunk_size ) , no_batch_dims=len(lowercase ) , )
lowercase__ = tensor_tree_map(lowercase , lowercase )
# Run the layer on the chunk
lowercase__ = layer(**lowercase )
# Allocate space for the output
if out is None:
lowercase__ = tensor_tree_map(lambda lowercase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowercase )
# Put the chunk in its pre-allocated space
if isinstance(lowercase , lowercase ):
def assign(lowercase : dict , lowercase : dict ) -> None:
for k, v in da.items():
if isinstance(lowercase , lowercase ):
assign(lowercase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ = da[k]
assign(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for xa, xa in zip(lowercase , lowercase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ = xa
elif isinstance(lowercase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
lowercase__ = tensor_tree_map(lambda lowercase : t.view(orig_batch_dims + t.shape[1:] ) , lowercase )
return out
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase = 5_1_2 , )-> Optional[Any]:
lowercase__ = max_chunk_size
lowercase__ = None
lowercase__ = None
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> int:
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ = [c for c in candidates if c > min_chunk_size]
lowercase__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_lowerCamelCase ) -> bool:
try:
with torch.no_grad():
fn(*_lowerCamelCase , chunk_size=_lowerCamelCase )
return True
except RuntimeError:
return False
lowercase__ = 0
lowercase__ = len(_lowerCamelCase ) - 1
while i > min_viable_chunk_size_index:
lowercase__ = test_chunk_size(candidates[i] )
if not viable:
lowercase__ = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ = i
lowercase__ = (i + len(_lowerCamelCase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case_( self , _lowerCamelCase , _lowerCamelCase )-> bool:
lowercase__ = True
for aa, aa in zip(_lowerCamelCase , _lowerCamelCase ):
assert type(_lowerCamelCase ) == type(_lowerCamelCase )
if isinstance(_lowerCamelCase , (list, tuple) ):
consistent &= self._compare_arg_caches(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _lowerCamelCase : x[0] )]
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda _lowerCamelCase : x[0] )]
consistent &= self._compare_arg_caches(_lowerCamelCase , _lowerCamelCase )
else:
consistent &= aa == aa
return consistent
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )-> int:
lowercase__ = True
lowercase__ = tree_map(lambda _lowerCamelCase : a.shape if isinstance(_lowerCamelCase , torch.Tensor ) else a , _lowerCamelCase , _lowerCamelCase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(_lowerCamelCase )
lowercase__ = self._compare_arg_caches(self.cached_arg_data , _lowerCamelCase )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ = False
if not consistent:
lowercase__ = self._determine_favorable_chunk_size(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
lowercase__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 161 | 1 |
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __lowercase ( ):
"""simple docstring"""
assert and_gate(0, 0 ) == 0
assert and_gate(0, 1 ) == 0
assert and_gate(1, 0 ) == 0
assert and_gate(1, 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 180 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = 0
__magic_name__ :Tuple = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__magic_name__ :str = i + 1
else:
__magic_name__ :List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 180 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def _snake_case ( snake_case__ : np.ndarray ):
A , A = np.shape(snake_case__ )
if rows != columns:
A = (
'\'table\' has to be of square shaped array but got a '
F'{rows}x{columns} array:\n{table}'
)
raise ValueError(snake_case__ )
A = np.zeros((rows, columns) )
A = np.zeros((rows, columns) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
A = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
A = (table[i][j] - total) / upper[j][j]
A = 1
for j in range(snake_case__ , snake_case__ ):
A = sum(lower[i][k] * upper[k][j] for k in range(snake_case__ ) )
A = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[Any]=sys.maxsize ):
A__ = '''bilinear'''
A__ = max_size
A__ = short_edge_length
def __call__( self : Optional[Any] , _lowerCamelCase : List[str] ):
A__ = []
for img in imgs:
A__ , A__ = img.shape[:2]
# later: provide list and randomly choose index for resize
A__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
A__ = size * 1.0 / min(_lowerCamelCase , _lowerCamelCase )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
if max(_lowerCamelCase , _lowerCamelCase ) > self.max_size:
A__ = self.max_size * 1.0 / max(_lowerCamelCase , _lowerCamelCase )
A__ = newh * scale
A__ = neww * scale
A__ = int(neww + 0.5 )
A__ = int(newh + 0.5 )
if img.dtype == np.uinta:
A__ = Image.fromarray(_lowerCamelCase )
A__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
A__ = np.asarray(_lowerCamelCase )
else:
A__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
A__ = nn.functional.interpolate(
_lowerCamelCase , (newh, neww) , mode=self.interp_method , align_corners=_lowerCamelCase ).squeeze(0 )
img_augs.append(_lowerCamelCase )
return img_augs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , _lowerCamelCase : Tuple ):
A__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
A__ = cfg.INPUT.FORMAT
A__ = cfg.SIZE_DIVISIBILITY
A__ = cfg.PAD_VALUE
A__ = cfg.INPUT.MAX_SIZE_TEST
A__ = cfg.MODEL.DEVICE
A__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
A__ = lambda _lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def A__ ( self : Dict , _lowerCamelCase : List[Any] ):
A__ = tuple(max(_lowerCamelCase ) for s in zip(*[img.shape for img in images] ) )
A__ = [im.shape[-2:] for im in images]
A__ = [
nn.functional.pad(
_lowerCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_lowerCamelCase , _lowerCamelCase )
]
return torch.stack(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
def __call__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False ):
with torch.no_grad():
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = [images]
if single_image:
assert len(_lowerCamelCase ) == 1
for i in range(len(_lowerCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_lowerCamelCase , images.pop(_lowerCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_lowerCamelCase , torch.as_tensor(img_tensorize(images.pop(_lowerCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
A__ = torch.tensor([im.shape[:2] for im in images] )
A__ = self.aug(_lowerCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
A__ = [self.normalizer(_lowerCamelCase ) for x in images]
# now pad them to do the following operations
A__ , A__ = self.pad(_lowerCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
A__ = torch.true_divide(_lowerCamelCase , _lowerCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def a_ ( __a , __a ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def a_ ( __a , __a ):
assert torch.isfinite(__a ).all(), "Box tensor contains infinite or NaN!"
A__ , A__ = box_size
tensor[:, 0].clamp_(min=0 , max=__a )
tensor[:, 1].clamp_(min=0 , max=__a )
tensor[:, 2].clamp_(min=0 , max=__a )
tensor[:, 3].clamp_(min=0 , max=__a )
| 571 | 0 |
"""simple docstring"""
def lowerCamelCase (a_ :int , a_ :int) -> int:
lowercase :Dict = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase :List[str] = n - k
# Calculate C(n,k)
for i in range(a_):
result *= n - i
result //= i + 1
return result
def lowerCamelCase (a_ :int) -> int:
return binomial_coefficient(2 * node_count , a_) // (node_count + 1)
def lowerCamelCase (a_ :int) -> int:
if n < 0:
raise ValueError('''factorial() not defined for negative values''')
lowercase :Optional[int] = 1
for i in range(1 , n + 1):
result *= i
return result
def lowerCamelCase (a_ :int) -> int:
return catalan_number(a_) * factorial(a_)
if __name__ == "__main__":
UpperCAmelCase = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
F"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
F"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 475 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''facebook/deit-base-distilled-patch16-224''': (
'''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'''
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class __magic_name__ ( __UpperCAmelCase ):
__A : Tuple = "deit"
def __init__( self : List[str] , snake_case__ : int=7_6_8 , snake_case__ : List[Any]=1_2 , snake_case__ : Dict=1_2 , snake_case__ : List[Any]=3_0_7_2 , snake_case__ : Optional[int]="gelu" , snake_case__ : Dict=0.0 , snake_case__ : str=0.0 , snake_case__ : List[str]=0.02 , snake_case__ : Optional[int]=1e-1_2 , snake_case__ : Any=2_2_4 , snake_case__ : Optional[int]=1_6 , snake_case__ : int=3 , snake_case__ : str=True , snake_case__ : Optional[int]=1_6 , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase :List[str] = hidden_size
lowercase :Union[str, Any] = num_hidden_layers
lowercase :Optional[Any] = num_attention_heads
lowercase :Optional[Any] = intermediate_size
lowercase :int = hidden_act
lowercase :int = hidden_dropout_prob
lowercase :Dict = attention_probs_dropout_prob
lowercase :Optional[int] = initializer_range
lowercase :Tuple = layer_norm_eps
lowercase :Optional[int] = image_size
lowercase :Union[str, Any] = patch_size
lowercase :Optional[Any] = num_channels
lowercase :Tuple = qkv_bias
lowercase :Optional[Any] = encoder_stride
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[int] = version.parse("1.11" )
@property
def __snake_case ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return 1e-4
| 475 | 1 |
def __snake_case ( lowerCAmelCase_ = 1_0_0_0_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE__ = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 100 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = GPTSwaTokenizer
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
def A ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case = GPTSwaTokenizer(a_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : str , a_ : List[Any] ):
"""simple docstring"""
__snake_case = "This is a test"
__snake_case = "This is a test"
return input_text, output_text
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = "<s>"
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(a_ ) , 2_000 )
def A ( self : Optional[int] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = GPTSwaTokenizer(a_ )
__snake_case = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [465, 287, 265, 631, 842] )
__snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
__snake_case = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__snake_case = tokenizer.convert_ids_to_tokens(a_ )
# fmt: off
self.assertListEqual(
a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = GPTSwaTokenizer(a_ )
__snake_case = ["This is a test", "I was born in 92000, and this is falsé."]
__snake_case = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a_ , a_ ):
self.assertListEqual(tokenizer.encode_fast(a_ ) , a_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(a_ , a_ ):
self.assertEqual(tokenizer.decode_fast(a_ ) , a_ )
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
__snake_case = {"input_ids": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=a_ , )
| 69 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__snake_case : Union[str, Any] ={
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""ernie_m"""
snake_case_ ={"""dropout""": """classifier_dropout""", """num_classes""": """num_labels"""}
def __init__(self ,__lowerCamelCase = 25_00_02 ,__lowerCamelCase = 7_68 ,__lowerCamelCase = 12 ,__lowerCamelCase = 12 ,__lowerCamelCase = 30_72 ,__lowerCamelCase = "gelu" ,__lowerCamelCase = 0.1 ,__lowerCamelCase = 0.1 ,__lowerCamelCase = 5_14 ,__lowerCamelCase = 0.02 ,__lowerCamelCase = 1 ,__lowerCamelCase = 1e-05 ,__lowerCamelCase=None ,__lowerCamelCase=False ,__lowerCamelCase=0.0 ,**__lowerCamelCase ,) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = max_position_embeddings
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : Union[str, Any] = classifier_dropout
lowerCAmelCase__ : Optional[Any] = is_decoder
lowerCAmelCase__ : Optional[int] = act_dropout
| 710 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Union[str, Any] =logging.get_logger(__name__)
__snake_case : Dict ={'vocab_file': 'sentencepiece.model'}
__snake_case : Optional[Any] ={
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
__snake_case : int ={
'google/rembert': 2_5_6,
}
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase="[CLS]" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="[UNK]" ,__lowerCamelCase="[SEP]" ,__lowerCamelCase="[PAD]" ,__lowerCamelCase="[CLS]" ,__lowerCamelCase="[MASK]" ,**__lowerCamelCase ,) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
do_lower_case=__lowerCamelCase ,remove_space=__lowerCamelCase ,keep_accents=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,**__lowerCamelCase ,)
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : Optional[Any] = remove_space
lowerCAmelCase__ : Any = keep_accents
lowerCAmelCase__ : Dict = vocab_file
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.__dict__.copy()
lowerCAmelCase__ : Dict = None
return state
def __setstate__(self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = d
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=False ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = self.sp_model.EncodeAsPieces(__lowerCamelCase )
return pieces
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.sp_model.PieceToId(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.sp_model.IdToPiece(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.sp_model.decode_pieces(__lowerCamelCase )
return out_string
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__lowerCamelCase ) )
return
lowerCAmelCase__ : Union[str, Any] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file ,__lowerCamelCase )
return (out_vocab_file,)
| 90 | 0 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
UpperCamelCase : Any = parse(importlib.metadata.version('torch'))
def A__ ( __lowerCAmelCase : Union[str, Version] , __lowerCAmelCase : str , __lowerCAmelCase : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
lowerCamelCase__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(A__ , A__ ):
lowerCamelCase__ = parse(importlib.metadata.version(A__ ) )
return operation(A__ , parse(A__ ) )
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
return compare_versions(A__ , A__ , A__ )
| 50 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case__ : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) ->None:
super().__init__(**__a )
lowerCamelCase_ : Any = size if size is not None else {"""shortest_edge""": 224}
lowerCamelCase_ : List[Any] = get_size_dict(__a , default_to_square=__a )
lowerCamelCase_ : List[str] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase_ : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
lowerCamelCase_ : List[Any] = do_resize
lowerCamelCase_ : str = size
lowerCamelCase_ : Any = resample
lowerCamelCase_ : List[Any] = do_center_crop
lowerCamelCase_ : Any = crop_size
lowerCamelCase_ : List[Any] = do_rescale
lowerCamelCase_ : Tuple = rescale_factor
lowerCamelCase_ : Dict = do_normalize
lowerCamelCase_ : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ : int = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ : Tuple = do_convert_rgb
def _lowerCAmelCase ( self : int , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ) ->np.ndarray:
lowerCamelCase_ : Optional[Any] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ : Dict = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) ->np.ndarray:
lowerCamelCase_ : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def _lowerCAmelCase ( self : Optional[Any] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ) ->Dict:
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowerCAmelCase ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) ->np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _lowerCAmelCase ( self : str , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[str] , ) ->PIL.Image.Image:
lowerCamelCase_ : List[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Any = size if size is not None else self.size
lowerCamelCase_ : str = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
lowerCamelCase_ : List[str] = resample if resample is not None else self.resample
lowerCamelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : Optional[int] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
lowerCamelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : Dict = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase_ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ : str = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ : str = [to_numpy_array(__a ) for image in images]
if do_resize:
lowerCamelCase_ : int = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
lowerCamelCase_ : Dict = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
lowerCamelCase_ : List[str] = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
lowerCamelCase_ : Tuple = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
lowerCamelCase_ : Tuple = [to_channel_dimension_format(__a , __a ) for image in images]
lowerCamelCase_ : Tuple = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 278 | 0 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__SCREAMING_SNAKE_CASE : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCAmelCase_( lowercase_ : dict[int, list[int]] , lowercase_ : int , lowercase_ : list[bool] ) -> list[int]:
_lowerCamelCase = True
_lowerCamelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowercase_ , lowercase_ , lowercase_ )
order.append(lowercase_ )
return order
def lowerCAmelCase_( lowercase_ : dict[int, list[int]] , lowercase_ : int , lowercase_ : list[bool] ) -> list[int]:
_lowerCamelCase = True
_lowerCamelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowercase_ , lowercase_ , lowercase_ )
return component
def lowerCAmelCase_( lowercase_ : dict[int, list[int]] ) -> list[list[int]]:
_lowerCamelCase = len(lowercase_ ) * [False]
_lowerCamelCase = {vert: [] for vert in range(len(lowercase_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowercase_ )
_lowerCamelCase = []
for i, was_visited in enumerate(lowercase_ ):
if not was_visited:
order += topology_sort(lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = []
_lowerCamelCase = len(lowercase_ ) * [False]
for i in range(len(lowercase_ ) ):
_lowerCamelCase = order[len(lowercase_ ) - i - 1]
if not visited[vert]:
_lowerCamelCase = find_components(lowercase_ , lowercase_ , lowercase_ )
components_list.append(lowercase_ )
return components_list
| 702 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__SCREAMING_SNAKE_CASE : Optional[Any] = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowerCAmelCase_( lowercase_ : List[Any] ) -> Optional[Any]:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase_ )
def lowerCAmelCase_( lowercase_ : List[str] ) -> List[str]:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_lowerCamelCase = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase_ , id=lowercase_ )
| 623 | 0 |
import math
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = 2
UpperCamelCase__ = int(math.sqrt(__a ) ) # Size of every segment
UpperCamelCase__ = [True] * (end + 1)
UpperCamelCase__ = []
while start <= end:
if temp[start] is True:
in_prime.append(__a )
for i in range(start * start , end + 1 , __a ):
UpperCamelCase__ = False
start += 1
prime += in_prime
UpperCamelCase__ = end + 1
UpperCamelCase__ = min(2 * end , __a )
while low <= n:
UpperCamelCase__ = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase__ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__a , high + 1 , __a ):
UpperCamelCase__ = False
for j in range(len(__a ) ):
if temp[j] is True:
prime.append(j + low )
UpperCamelCase__ = high + 1
UpperCamelCase__ = min(high + end , __a )
return prime
print(sieve(10**6))
| 513 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> List[Any]:
try:
snake_case : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
snake_case : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
snake_case : Tuple = strtobool(lowercase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
lowerCamelCase : Tuple = parse_flag_from_env('RUN_SLOW', default=False)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
return unittest.skip("""Test was skipped""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(_run_slow_tests ,"""test is slow""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
return unittest.skipUnless(not torch.cuda.is_available() ,"""test requires only a CPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.is_available() ,"""test requires a GPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available() ,"""test requires a XPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
return unittest.skipUnless(is_mps_available() ,"""test requires a `mps` backend support in `torch`""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() ,"""test requires the Hugging Face suite""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
return unittest.skipUnless(is_bnb_available() ,"""test requires the bitsandbytes library""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
return unittest.skipUnless(is_tpu_available() ,"""test requires TPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() == 1 ,"""test requires a GPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return unittest.skipUnless(torch.xpu.device_count() == 1 ,"""test requires a XPU""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
return unittest.skipUnless(torch.cuda.device_count() > 1 ,"""test requires multiple GPUs""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 ,"""test requires multiple XPUs""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
return unittest.skipUnless(is_safetensors_available() ,"""test requires safetensors""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
return unittest.skipUnless(is_deepspeed_available() ,"""test requires DeepSpeed""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(is_torch_version(""">=""" ,"""1.12.0""" ) ,"""test requires torch version >= 1.12.0""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase=None ,lowercase=None ) -> Optional[int]:
if test_case is None:
return partial(lowercase ,version=lowercase )
return unittest.skipUnless(is_torch_version(""">=""" ,lowercase ) ,f"""test requires torch version >= {version}""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
return unittest.skipUnless(is_tensorboard_available() ,"""test requires Tensorboard""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(is_wandb_available() ,"""test requires wandb""" )(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
return unittest.skipUnless(is_comet_ml_available() ,"""test requires comet_ml""" )(lowercase )
lowerCamelCase : Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
return unittest.skipUnless(
_atleast_one_tracker_available ,"""test requires at least one tracker to be available and for `comet_ml` to not be installed""" ,)(lowercase )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_snake_case = True
@classmethod
def UpperCAmelCase ( cls ) -> int:
snake_case : int = tempfile.mkdtemp()
@classmethod
def UpperCAmelCase ( cls ) -> str:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def UpperCAmelCase ( self ) -> Tuple:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
snake_case : List[str] = mocks if isinstance(A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Optional[int] = AcceleratorState()
snake_case : int = tensor[None].clone().to(state.device )
snake_case : Dict = gather(lowercase ).cpu()
snake_case : str = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] ,lowercase ):
return False
return True
class __lowercase :
"""simple docstring"""
def __init__( self , A , A , A ) -> Optional[int]:
snake_case : Tuple = returncode
snake_case : str = stdout
snake_case : int = stderr
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
while True:
snake_case : Any = await stream.readline()
if line:
callback(lowercase )
else:
break
async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ,lowercase=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ ,""" """.join(lowercase ) )
snake_case : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] ,*cmd[1:] ,stdin=lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=lowercase ,)
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
snake_case : Dict = []
snake_case : Union[str, Any] = []
def tee(lowercase ,lowercase ,lowercase ,lowercase="" ):
snake_case : str = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase )
if not quiet:
print(lowercase ,lowercase ,file=lowercase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout ,lambda lowercase : tee(lowercase ,lowercase ,sys.stdout ,label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr ,lambda lowercase : tee(lowercase ,lowercase ,sys.stderr ,label="""stderr:""" ) ) ),
] ,timeout=lowercase ,)
return _RunOutput(await p.wait() ,lowercase ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=180 ,lowercase=False ,lowercase=True ) -> _RunOutput:
snake_case : str = asyncio.get_event_loop()
snake_case : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(lowercase ,env=lowercase ,stdin=lowercase ,timeout=lowercase ,quiet=lowercase ,echo=lowercase ) )
snake_case : List[str] = """ """.join(lowercase )
if result.returncode > 0:
snake_case : List[Any] = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> List[str]:
try:
snake_case : List[str] = subprocess.check_output(lowercase ,stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowercase ,"""decode""" ):
snake_case : List[str] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{" ".join(lowercase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 587 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
def UpperCamelCase_ ( A__ , A__ ):
a_ = nn.functional.normalize(A__ )
a_ = nn.functional.normalize(A__ )
return torch.mm(A__ , normalized_text_embeds.t() )
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : str = CLIPConfig
lowerCamelCase__ : Tuple = ['CLIPEncoderLayer']
def __init__( self , UpperCAmelCase ):
super().__init__(UpperCAmelCase )
a_ = CLIPVisionModel(config.vision_config )
a_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCAmelCase )
a_ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCAmelCase )
a_ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCAmelCase )
a_ = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCAmelCase )
a_ = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCAmelCase )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
a_ = self.vision_model(UpperCAmelCase )[1] # pooled_output
a_ = self.visual_projection(UpperCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a_ = cosine_distance(UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy()
a_ = cosine_distance(UpperCAmelCase , self.concept_embeds ).cpu().float().numpy()
a_ = []
a_ = image_embeds.shape[0]
for i in range(UpperCAmelCase ):
a_ = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
a_ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
a_ = special_cos_dist[i][concept_idx]
a_ = self.special_care_embeds_weights[concept_idx].item()
a_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
a_ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
a_ = cos_dist[i][concept_idx]
a_ = self.concept_embeds_weights[concept_idx].item()
a_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(UpperCAmelCase )
result.append(UpperCAmelCase )
a_ = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
a_ = self.vision_model(UpperCAmelCase )[1] # pooled_output
a_ = self.visual_projection(UpperCAmelCase )
a_ = cosine_distance(UpperCAmelCase , self.special_care_embeds )
a_ = cosine_distance(UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
a_ = 0.0
a_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
a_ = torch.any(special_scores > 0 , dim=1 )
a_ = special_care * 0.01
a_ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
a_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
a_ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 511 |
'''simple docstring'''
from maths.prime_factors import prime_factors
def UpperCamelCase_ ( A__ ):
if not isinstance(A__ , A__ ):
a_ = F'''Input value of [number={number}] must be an integer'''
raise TypeError(A__ )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(A__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 511 | 1 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase = 200 ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : str = [1, 2, 5, 10, 20, 50, 100, 200]
__UpperCAmelCase : Dict = [0] * (pence + 1)
__UpperCAmelCase : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__SCREAMING_SNAKE_CASE , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 77 | """simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE =get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = SpeechTaTokenizer
lowercase = False
lowercase = True
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Optional[Any] = SpeechTaTokenizer(__UpperCamelCase )
lowercase_ : Optional[Any] = AddedToken('<mask>' ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )
lowercase_ : List[Any] = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = 'this is a test'
lowercase_ : Any = 'this is a test'
return input_text, output_text
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=20 ,__UpperCamelCase=5 ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.get_input_output_texts(__UpperCamelCase )
lowercase_ : Any = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
lowercase_ : Any = tokenizer.decode(__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
return text, ids
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[Any] = '<pad>'
lowercase_ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-4] ,'œ' )
self.assertEqual(vocab_keys[-2] ,'<mask>' )
self.assertEqual(vocab_keys[-1] ,'<ctc_blank>' )
self.assertEqual(len(__UpperCamelCase ) ,81 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_tokenizers(do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowercase_ : Dict = tokenizer.vocab_size
lowercase_ : List[Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowercase_ : Union[str, Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowercase_ : Optional[Any] = tokenizer.add_tokens(__UpperCamelCase )
lowercase_ : Union[str, Any] = tokenizer.vocab_size
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase ,0 )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase ,all_size + len(__UpperCamelCase ) )
lowercase_ : Optional[Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
lowercase_ : Dict = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowercase_ : Any = tokenizer.add_special_tokens(__UpperCamelCase )
lowercase_ : int = tokenizer.vocab_size
lowercase_ : str = len(__UpperCamelCase )
self.assertNotEqual(__UpperCamelCase ,0 )
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,len(__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase ,all_size_a + len(__UpperCamelCase ) )
lowercase_ : Optional[int] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=__UpperCamelCase )
self.assertGreaterEqual(len(__UpperCamelCase ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : Optional[Any] = tokenizer.tokenize('This is a test' )
# fmt: off
self.assertListEqual(__UpperCamelCase ,[SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
lowercase_ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
lowercase_ : str = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
# fmt: off
self.assertListEqual(__UpperCamelCase ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
lowercase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase ,[SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'] )
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = [
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
lowercase_ : Union[str, Any] = {
'input_ids': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase ,model_name='microsoft/speecht5_asr' ,revision='c5ef64c71905caeccde0e4462ef3f9077224c524' ,sequences=__UpperCamelCase ,)
| 425 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : str ={
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] =[
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict =[
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
UpperCAmelCase : Dict ={"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """ctrl"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case__=24_6534 , snake_case__=256 , snake_case__=1280 , snake_case__=8192 , snake_case__=48 , snake_case__=16 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-6 , snake_case__=0.02 , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = vocab_size
UpperCamelCase_ = n_positions
UpperCamelCase_ = n_embd
UpperCamelCase_ = n_layer
UpperCamelCase_ = n_head
UpperCamelCase_ = dff
UpperCamelCase_ = resid_pdrop
UpperCamelCase_ = embd_pdrop
UpperCamelCase_ = layer_norm_epsilon
UpperCamelCase_ = initializer_range
UpperCamelCase_ = use_cache
super().__init__(**snake_case__ )
| 504 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : List[Any] =CTRLTokenizer
a : Tuple =False
a : List[str] =False
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """adapt react readapt apt"""
__lowerCAmelCase = """adapt react readapt apt"""
return input_text, output_text
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = CTRLTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
__lowerCAmelCase = """adapt react readapt apt"""
__lowerCAmelCase = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
__lowerCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
| 689 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=lowerCAmelCase_ ):
a : List[str] =["""onnx"""]
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
@classmethod
def lowerCamelCase__ ( cls,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(cls,["""onnx"""] )
| 689 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 |
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__magic_name__, '''embed_dim''' ) )
self.parent.assertTrue(hasattr(__magic_name__, '''num_heads''' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=64, __magic_name__=3, __magic_name__=[16, 48, 96], __magic_name__=[1, 3, 6], __magic_name__=[1, 2, 10], __magic_name__=[7, 3, 3], __magic_name__=[4, 2, 2], __magic_name__=[2, 1, 1], __magic_name__=[2, 2, 2], __magic_name__=[False, False, True], __magic_name__=[0.0, 0.0, 0.0], __magic_name__=0.02, __magic_name__=1E-12, __magic_name__=True, __magic_name__=True, __magic_name__=2, ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = parent
UpperCamelCase__ : Optional[Any] = batch_size
UpperCamelCase__ : Dict = image_size
UpperCamelCase__ : Optional[Any] = patch_sizes
UpperCamelCase__ : List[str] = patch_stride
UpperCamelCase__ : Tuple = patch_padding
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[int] = use_labels
UpperCamelCase__ : Union[str, Any] = num_labels
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Optional[Any] = embed_dim
UpperCamelCase__ : List[str] = num_heads
UpperCamelCase__ : Any = stride_kv
UpperCamelCase__ : Any = depth
UpperCamelCase__ : Tuple = cls_token
UpperCamelCase__ : List[Any] = attention_drop_rate
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : str = layer_norm_eps
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : int = None
if self.use_labels:
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase__ : str = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return CvtConfig(
image_size=self.image_size, num_labels=self.num_labels, num_channels=self.num_channels, embed_dim=self.embed_dim, num_heads=self.num_heads, patch_sizes=self.patch_sizes, patch_padding=self.patch_padding, patch_stride=self.patch_stride, stride_kv=self.stride_kv, depth=self.depth, cls_token=self.cls_token, attention_drop_rate=self.attention_drop_rate, initializer_range=self.initializer_range, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = CvtModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : Optional[int] = model(__magic_name__ )
UpperCamelCase__ : List[str] = (self.image_size, self.image_size)
UpperCamelCase__ ,UpperCamelCase__ : Tuple = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCamelCase__ : Optional[int] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCamelCase__ : int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.embed_dim[-1], height, width) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.num_labels
UpperCamelCase__ : str = CvtForImageClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : Any = model(__magic_name__, labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a : Any = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a : List[str] = False
a : int = False
a : Tuple = False
a : int = False
a : Tuple = False
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Dict = CvtModelTester(self )
UpperCamelCase__ : List[Any] = ConfigTester(self, config_class=__magic_name__, has_text_modality=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(__magic_name__ )
UpperCamelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : List[Any] = model(**self._prepare_for_class(__magic_name__, __magic_name__ ) )
UpperCamelCase__ : str = outputs.hidden_states
UpperCamelCase__ : Dict = len(self.model_tester.depth )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ), [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
], )
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] = CvtModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.default_image_processor
UpperCamelCase__ : Any = prepare_img()
UpperCamelCase__ : List[str] = image_processor(images=__magic_name__, return_tensors='''pt''' ).to(__magic_name__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**__magic_name__ )
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : List[Any] = torch.tensor([0.9285, 0.9015, -0.3150] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __magic_name__, atol=1E-4 ) )
| 369 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case__ :list[int | float] , snake_case__ :int , snake_case__ :int ) -> int | float:
if len(snake_case__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case__ )
or left < -len(snake_case__ )
or right >= len(snake_case__ )
or right < -len(snake_case__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
_lowercase = (left + right) >> 1 # the middle
_lowercase = find_max(snake_case__ , snake_case__ , snake_case__ ) # find max in range[left, mid]
_lowercase = find_max(snake_case__ , mid + 1 , snake_case__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 67 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
SCREAMING_SNAKE_CASE :Union[str, Any] = object()
# For specifying empty leaf dict `{}`
SCREAMING_SNAKE_CASE :List[str] = object()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(a_ ) - len(a_ ) + 1 ):
__A = [x.match(a_ ) for x, y in zip(a_ , ks[i:] )]
if matches and all(a_ ):
return True
return False
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
def replace(a_ , a_ ):
for rule, replacement in rules:
if _match(a_ , a_ ):
return replacement
return val
return replace
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , a_ )),
(("transformer", "wte", "embedding"), P("mp" , a_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a_ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , a_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a_ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , a_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = _get_partition_rules()
__A = _replacement_rules(a_ )
__A = {k: _unmatched for k in flatten_dict(a_ )}
__A = {k: replace(a_ , a_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a_ ) )
| 55 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowercase__ =version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : tuple , lowerCAmelCase__ : Path , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=False , ):
output_path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , enable_onnx_checker=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
else:
export(
lowerCAmelCase__ , lowerCAmelCase__ , f=output_path.as_posix() , input_names=lowerCAmelCase__ , output_names=lowerCAmelCase__ , dynamic_axes=lowerCAmelCase__ , do_constant_folding=lowerCAmelCase__ , opset_version=lowerCAmelCase__ , )
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ):
__a : Union[str, Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__a : Any = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
__a : Tuple = '''cpu'''
__a : Optional[int] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=lowerCAmelCase__ ).to(lowerCAmelCase__ )
__a : Any = Path(lowerCAmelCase__ )
# TEXT ENCODER
__a : List[Any] = pipeline.text_encoder.config.max_position_embeddings
__a : Dict = pipeline.text_encoder.config.hidden_size
__a : Optional[Any] = pipeline.tokenizer(
'''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase__ , dtype=torch.intaa )) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''sequence'''},
} , opset=lowerCAmelCase__ , )
del pipeline.text_encoder
# UNET
__a : int = pipeline.unet.config.in_channels
__a : Optional[int] = pipeline.unet.config.sample_size
__a : Tuple = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(2 , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=lowerCAmelCase__ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''timestep''': {0: '''batch'''},
'''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''},
} , opset=lowerCAmelCase__ , use_external_data_format=lowerCAmelCase__ , )
__a : Optional[Any] = str(unet_path.absolute().as_posix() )
__a : Optional[Any] = os.path.dirname(lowerCAmelCase__ )
__a : Tuple = onnx.load(lowerCAmelCase__ )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase__ )
os.mkdir(lowerCAmelCase__ )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase__ , lowerCAmelCase__ , save_as_external_data=lowerCAmelCase__ , all_tensors_to_one_file=lowerCAmelCase__ , location='''weights.pb''' , convert_attribute=lowerCAmelCase__ , )
del pipeline.unet
# VAE ENCODER
__a : List[Any] = pipeline.vae
__a : Dict = vae_encoder.config.in_channels
__a : Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
__a : Union[str, Any] = lambda lowerCAmelCase__ , lowerCAmelCase__ : vae_encoder.encode(lowerCAmelCase__ , lowerCAmelCase__ )[0].sample()
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={
'''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowerCAmelCase__ , )
# VAE DECODER
__a : Dict = pipeline.vae
__a : Dict = vae_decoder.config.latent_channels
__a : Tuple = vae_decoder.config.out_channels
# forward only through the decoder part
__a : Union[str, Any] = vae_encoder.decode
onnx_export(
lowerCAmelCase__ , model_args=(
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowerCAmelCase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
__a : Any = pipeline.safety_checker
__a : Tuple = safety_checker.config.vision_config.num_channels
__a : List[str] = safety_checker.config.vision_config.image_size
__a : Optional[int] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
torch.randn(1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ ),
) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={
'''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
'''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''},
} , opset=lowerCAmelCase__ , )
del pipeline.safety_checker
__a : Union[str, Any] = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''' )
__a : List[Any] = pipeline.feature_extractor
else:
__a : Optional[Any] = None
__a : Optional[Any] = None
__a : List[Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''' ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase__ )
print('''ONNX pipeline saved to''' , lowerCAmelCase__ )
del pipeline
del onnx_pipeline
__a : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , provider='''CPUExecutionProvider''' )
print('''ONNX pipeline is loadable''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowercase__ =parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 326 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowercase__ ='src/transformers'
lowercase__ ='docs/source/en'
lowercase__ ='.'
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ):
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__a : Any = f.readlines()
# Find the start prompt.
__a : List[Any] = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
__a : Any = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowercase__ ='Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowercase__ =re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowercase__ =re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowercase__ =re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ =direct_transformers_import(TRANSFORMERS_PATH)
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
__a : Any = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ )
__a : List[Any] = (width - text_length) // 2
__a : Tuple = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __UpperCamelCase ( ):
__a : List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__a : Optional[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__a : Union[str, Any] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__a : Optional[int] = collections.defaultdict(lowerCAmelCase__ )
__a : List[Any] = collections.defaultdict(lowerCAmelCase__ )
__a : Dict = collections.defaultdict(lowerCAmelCase__ )
__a : Tuple = collections.defaultdict(lowerCAmelCase__ )
__a : Union[str, Any] = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
__a : Any = None
if attr_name.endswith('''Tokenizer''' ):
__a : Union[str, Any] = slow_tokenizers
__a : List[str] = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__a : Union[str, Any] = fast_tokenizers
__a : List[Any] = attr_name[:-1_3]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = tf_models
__a : Tuple = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
__a : List[str] = flax_models
__a : str = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
__a : Union[str, Any] = pt_models
__a : int = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__a : List[str] = True
break
# Try again after removing the last word in the name
__a : str = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
__a : Optional[int] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__a : Optional[int] = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__a : Any = [len(lowerCAmelCase__ ) + 2 for c in columns]
__a : Union[str, Any] = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
__a : List[str] = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__a : Union[str, Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
__a : str = model_name_to_prefix[name]
__a : str = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int]=False ):
__a , __a , __a , __a : Optional[int] = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__a : Union[str, Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 326 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int:
'''simple docstring'''
assert isinstance(_lowercase , _lowercase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
a : Union[str, Any] = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(_lowercase )
else:
a : Any = sylvester(number - 1 )
a : Tuple = num - 1
a : Optional[int] = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 633 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''OwlViTFeatureExtractor''']
a : List[Any] = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 633 | 1 |
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(UpperCAmelCase__ , x % y )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return (x * y) // greatest_common_divisor(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ = 2_0 ):
lowercase_ = 1
for i in range(1 , n + 1 ):
lowercase_ = lcm(UpperCAmelCase__ , UpperCAmelCase__ )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 650 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650 | 1 |
def lowerCAmelCase_ ( _snake_case : int , _snake_case : int ) -> float:
'''simple docstring'''
return base * power(_snake_case , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("Raise base to the power of exponent using recursion...")
snake_case : Optional[int] = int(input("Enter the base: ").strip())
snake_case : Any = int(input("Enter the exponent: ").strip())
snake_case : Dict = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
snake_case : List[str] = 1 / result
print(F"{base} to the power of {exponent} is {result}")
| 124 |
import math
def lowerCAmelCase_ ( _snake_case : int ) -> int:
'''simple docstring'''
if not isinstance(_snake_case , _snake_case ):
__magic_name__ : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_snake_case )
if number < 1:
__magic_name__ : int = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__magic_name__ : Optional[int] = int(math.log(number // 3 , 2 ) ) + 2
__magic_name__ : Dict = [3, 5]
__magic_name__ : Tuple = 2
__magic_name__ : Dict = 3
for block in range(1 , _snake_case ):
for _ in range(_snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
snake_case : Tuple = 0
try:
snake_case : Any = proth(number)
except ValueError:
print(F"ValueError: there is no {number}th Proth number")
continue
print(F"The {number}th Proth number: {value}")
| 124 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_UpperCAmelCase : Tuple = TypeVar("""_T""")
class lowercase ( Generic[_T] ):
def __init__( self , snake_case = None ):
snake_case_ = list(iterable or [] )
snake_case_ = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def a ( self , snake_case ):
self._stacka.append(snake_case )
def a ( self ):
snake_case_ = self._stacka.pop
snake_case_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 108 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ):
'''simple docstring'''
model.train()
snake_case_ = model(UpperCamelCase__ )
snake_case_ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
'''simple docstring'''
set_seed(42 )
snake_case_ = RegressionModel()
snake_case_ = deepcopy(UpperCamelCase__ )
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
snake_case_ = AdamW(params=model.parameters() , lr=1E-3 )
snake_case_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
snake_case_ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
snake_case_ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
# Use a single batch
snake_case_ , snake_case_ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
# Use a single batch
snake_case_ , snake_case_ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def __lowerCamelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
snake_case_ , snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
snake_case_ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def __lowerCamelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
'''simple docstring'''
snake_case_ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
snake_case_ , snake_case_ = batch.values()
# Gather the distributed inputs and targs for the base model
snake_case_ , snake_case_ = accelerator.gather((ddp_input, ddp_target) )
snake_case_ , snake_case_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
snake_case_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Accelerator()
snake_case_ = RegressionDataset(length=80 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
snake_case_ = RegressionDataset(length=96 )
snake_case_ = DataLoader(UpperCamelCase__ , batch_size=16 )
snake_case_ , snake_case_ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Accelerator()
snake_case_ = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 108 | 1 |
import numpy as np
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1E-12 , SCREAMING_SNAKE_CASE__ = 100 , ):
assert np.shape(_A )[0] == np.shape(_A )[1]
# Ensure proper dimensionality.
assert np.shape(_A )[0] == np.shape(_A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_A ) == np.iscomplexobj(_A )
snake_case_ = np.iscomplexobj(_A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_A , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case_ = False
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1E12
while not convergence:
# Multiple matrix by the vector.
snake_case_ = np.dot(_A , _A )
# Normalize the resulting output vector.
snake_case_ = w / np.linalg.norm(_A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case_ = vector.conj().T if is_complex else vector.T
snake_case_ = np.dot(_A , np.dot(_A , _A ) )
# Check convergence.
snake_case_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case_ = True
snake_case_ = lambda_
if is_complex:
snake_case_ = np.real(lambda_ )
return lambda_, vector
def __SCREAMING_SNAKE_CASE ():
snake_case_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case_ = np.array([41, 4, 20] )
snake_case_ = real_input_matrix.astype(np.complexaaa )
snake_case_ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case_ = real_input_matrix
snake_case_ = real_vector
elif problem_type == "complex":
snake_case_ = complex_input_matrix
snake_case_ = complex_vector
# Our implementation.
snake_case_, snake_case_ = power_iteration(_A , _A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case_, snake_case_ = np.linalg.eigh(_A )
# Last eigenvalue is the maximum one.
snake_case_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_A ) - np.abs(_A ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 39 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCamelCase_ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=UpperCAmelCase , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase )
class UpperCamelCase_ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def lowerCamelCase_ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=UpperCAmelCase , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCAmelCase )
def UpperCamelCase__ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def UpperCamelCase__ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
@require_beam
def lowerCamelCase_ ( self ):
__lowerCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase = DummyBeamDataset(cache_dir=UpperCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self ):
import apache_beam as beam
__lowerCamelCase = beam.io.parquetio.WriteToParquet
__lowerCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase = DummyBeamDataset(cache_dir=UpperCAmelCase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__lowerCamelCase = partial(UpperCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def lowerCamelCase_ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase = DummyBeamDataset(cache_dir=UpperCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowerCamelCase_ ( self ):
__lowerCamelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__lowerCamelCase = NestedBeamDataset(cache_dir=UpperCAmelCase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__lowerCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , UpperCAmelCase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , UpperCAmelCase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCAmelCase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 479 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[str] = "big_bird"
def __init__( self , __A=5_0358 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu_new" , __A=0.1 , __A=0.1 , __A=4096 , __A=2 , __A=0.02 , __A=1e-12 , __A=True , __A=0 , __A=1 , __A=2 , __A=66 , __A="block_sparse" , __A=True , __A=False , __A=64 , __A=3 , __A=None , **__A , ):
"""simple docstring"""
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , sep_token_id=__A , **__A , )
lowerCamelCase : str = vocab_size
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : str = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : List[str] = type_vocab_size
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Optional[Any] = rescale_embeddings
lowerCamelCase : Optional[Any] = attention_type
lowerCamelCase : str = use_bias
lowerCamelCase : List[str] = block_size
lowerCamelCase : Optional[int] = num_random_blocks
lowerCamelCase : Tuple = classifier_dropout
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 231 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = "distilbert"
__A : Tuple = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __A=3_0522 , __A=512 , __A=False , __A=6 , __A=12 , __A=768 , __A=4 * 768 , __A=0.1 , __A=0.1 , __A="gelu" , __A=0.02 , __A=0.1 , __A=0.2 , __A=0 , **__A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : Union[str, Any] = max_position_embeddings
lowerCamelCase : int = sinusoidal_pos_embds
lowerCamelCase : int = n_layers
lowerCamelCase : str = n_heads
lowerCamelCase : Optional[Any] = dim
lowerCamelCase : int = hidden_dim
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : List[str] = activation
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Tuple = qa_dropout
lowerCamelCase : Optional[int] = seq_classif_dropout
super().__init__(**__A , pad_token_id=__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 231 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( a__) -> int:
"""simple docstring"""
_snake_case : Tuple = len(a__)
_snake_case : Optional[Any] = len(matrix[0])
_snake_case : Dict = min(a__ , a__)
for row in range(a__):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , a__):
_snake_case : Union[str, Any] = matrix[col][row] / matrix[row][row]
for i in range(a__ , a__):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_snake_case : Optional[int] = True
for i in range(row + 1 , a__):
if matrix[i][row] != 0:
_snake_case , _snake_case : List[Any] = matrix[i], matrix[row]
_snake_case : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(a__):
_snake_case : int = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase(_lowercase , _lowercase , unittest.TestCase ):
__snake_case: Optional[int] = IFImgaImgSuperResolutionPipeline
__snake_case: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
__snake_case: Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
__snake_case: List[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith('mps' ):
a__ = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
a__ = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
a__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
a__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase__ ( self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 273 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[int] =LEDTokenizer
lowercase : Any =LEDTokenizerFast
lowercase : Any =True
def UpperCamelCase ( self ):
super().setUp()
lowercase_ :List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase_ :str = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowercase_ :List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase_ :Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase_ :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase_ ) )
def UpperCamelCase ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def UpperCamelCase ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase ( self ):
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def UpperCamelCase ( self ):
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase_ :str = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :List[Any] = tokenizer(UpperCamelCase_ , max_length=len(UpperCamelCase_ ) , padding=UpperCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase_ :Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , UpperCamelCase_ )
self.assertIn('''attention_mask''' , UpperCamelCase_ )
self.assertNotIn('''labels''' , UpperCamelCase_ )
self.assertNotIn('''decoder_attention_mask''' , UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :List[str] = tokenizer(text_target=UpperCamelCase_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def UpperCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :Union[str, Any] = tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = ['''A long paragraph for summarization.''']
lowercase_ :Any = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
lowercase_ :int = tokenizer(text_target=UpperCamelCase_ , return_tensors='''pt''' )
lowercase_ :Union[str, Any] = inputs['''input_ids''']
lowercase_ :Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCamelCase ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ :Union[str, Any] = ['''Summary of the text.''', '''Another summary.''']
lowercase_ :int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase_ :Any = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
lowercase_ :Union[str, Any] = [[0] * len(UpperCamelCase_ ) for x in encoded_output['''input_ids''']]
lowercase_ :List[Any] = tokenizer.pad(UpperCamelCase_ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , UpperCamelCase_ )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :int = '''A, <mask> AllenNLP sentence.'''
lowercase_ :Any = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
lowercase_ :Dict = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase_ :List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase_ :str = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 441 |
import math
SCREAMING_SNAKE_CASE : List[str] = 10
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : int = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( _a = 2_0 ) -> str:
'''simple docstring'''
lowercase_ :List[str] = math.comb(_a , _a )
lowercase_ :Tuple = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _a )
lowercase_ :Dict = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 441 | 1 |
import math
def UpperCAmelCase ( UpperCAmelCase )-> int:
'''simple docstring'''
if not isinstance(UpperCAmelCase ,UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase )
if number < 1:
SCREAMING_SNAKE_CASE_ = f'''Input value of [number={number}] must be > 0'''
raise ValueError(UpperCAmelCase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
SCREAMING_SNAKE_CASE_ = int(math.log(number // 3 ,2 ) ) + 2
SCREAMING_SNAKE_CASE_ = [3, 5]
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
for block in range(1 ,UpperCAmelCase ):
for _ in range(UpperCAmelCase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
A_ = 0
try:
A_ = proth(number)
except ValueError:
print(F'ValueError: there is no {number}th Proth number')
continue
print(F'The {number}th Proth number: {value}')
| 393 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A_ = logging.WARNING
def UpperCAmelCase ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = os.getenv('''DATASETS_VERBOSITY''' ,UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def UpperCAmelCase ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase ( UpperCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
return logging.getLogger(UpperCAmelCase )
def UpperCAmelCase ( )-> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( UpperCAmelCase )-> None:
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
def empty_fn(*lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> List[str]:
"""simple docstring"""
return self
def __exit__( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> Dict:
"""simple docstring"""
return
A_ = True
class snake_case :
'''simple docstring'''
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ = _tqdm_cls()
def UpperCAmelCase ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
| 393 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 707 |
import argparse
import struct
import unittest
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , _snake_case : bytes ):
"""simple docstring"""
A__ = data
# Initialize hash values
A__ = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
A__ = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _a ( _snake_case : bytes ):
"""simple docstring"""
A__ = B'\x80' + (B'\x00' * (63 - (len(_snake_case ) + 8) % 64))
A__ = struct.pack('>Q' , (len(_snake_case ) * 8) )
return data + padding + big_endian_integer
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('>16L' , _snake_case ) )
# add 48 0-ed integers
words += [0] * 48
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
A__ = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
A__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
A__ = self.ror(_snake_case , 6 ) ^ self.ror(_snake_case , 11 ) ^ self.ror(_snake_case , 25 )
A__ = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
A__ = self.ror(_snake_case , 2 ) ^ self.ror(_snake_case , 13 ) ^ self.ror(_snake_case , 22 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x100000000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
A__ = ''.join([hex(_snake_case )[2:].zfill(8 ) for value in self.hashes] )
def _a ( self : Dict , _snake_case : int , _snake_case : int ):
"""simple docstring"""
return 0xFFFFFFFF & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : str ):
"""simple docstring"""
import hashlib
A__ = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_snake_case ).hash , hashlib.shaaaa(_snake_case ).hexdigest() )
def A ( ) -> None:
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A__ = f.read()
else:
A__ = bytes(__UpperCamelCase , 'utf-8' )
print(SHAaaa(__UpperCamelCase ).hash )
if __name__ == "__main__":
main()
| 52 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=64 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=5_12 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=None , ) -> List[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = embedding_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = MegatronBertModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
lowerCAmelCase__ = MegatronBertForMaskedLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = MegatronBertForCausalLM(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = MegatronBertForNextSentencePrediction(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = MegatronBertForPreTraining(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , next_sentence_label=lowerCamelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = MegatronBertForQuestionAnswering(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MegatronBertForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = MegatronBertForTokenClassification(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = MegatronBertForMultipleChoice(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Any = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase__ : Union[str, Any] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ : Dict = True
# test_resize_embeddings = False
lowercase__ : Optional[Any] = False
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False ) -> Optional[int]:
lowerCAmelCase__ = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCAmelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ )
lowerCAmelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = MegatronBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowerCamelCase_ )
def _snake_case ( A ) -> Any:
return torch.tensor(
A , dtype=torch.long , device=A , )
__UpperCAmelCase = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip('''Model is not available.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowerCAmelCase__ = os.path.join(os.environ['''MYDIR'''] , lowerCamelCase_ )
lowerCAmelCase__ = MegatronBertModel.from_pretrained(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.half()
lowerCAmelCase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ )[0]
lowerCAmelCase__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , lowerCamelCase_ )
lowerCAmelCase__ = [-0.6_040, -0.2_517, -0.1_025, 0.3_420, -0.6_758, -0.0_017, -0.1_089, -0.1_990, 0.5_728]
for ii in range(3 ):
for jj in range(3 ):
lowerCAmelCase__ = output[0, ii, jj]
lowerCAmelCase__ = expected[3 * ii + jj]
lowerCAmelCase__ = '''ii={} jj={} a={} b={}'''.format(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
self.assertTrue(math.isclose(lowerCamelCase_ , lowerCamelCase_ , rel_tol=lowerCamelCase_ , abs_tol=lowerCamelCase_ ) , msg=lowerCamelCase_ ) | 90 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str):
# Initialise PyTorch model
lowerCamelCase : Optional[int] = FunnelConfig.from_json_file(UpperCAmelCase__)
print(F'''Building PyTorch model from configuration: {config}''')
lowerCamelCase : Optional[Any] = FunnelBaseModel(UpperCAmelCase__) if base_model else FunnelModel(UpperCAmelCase__)
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
torch.save(model.state_dict() , UpperCAmelCase__)
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 320 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
UpperCamelCase__ = get_failure_array(SCREAMING_SNAKE_CASE )
# 2) Step through text searching for pattern
UpperCamelCase__ , UpperCamelCase__ = 0, 0 # index into text, pattern
while i < len(SCREAMING_SNAKE_CASE ):
if pattern[j] == text[i]:
if j == (len(SCREAMING_SNAKE_CASE ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
UpperCamelCase__ = failure[j - 1]
continue
i += 1
return False
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
UpperCamelCase__ = [0]
UpperCamelCase__ = 0
UpperCamelCase__ = 1
while j < len(SCREAMING_SNAKE_CASE ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
UpperCamelCase__ = failure[i - 1]
continue
j += 1
failure.append(SCREAMING_SNAKE_CASE )
return failure
if __name__ == "__main__":
# Test 1)
A__ : Dict= """abc1abc12"""
A__ : int= """alskfjaldsabc1abc1abc12k23adsfabcabc"""
A__ : Any= """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
A__ : Optional[Any]= """ABABX"""
A__ : Union[str, Any]= """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
A__ : List[str]= """AAAB"""
A__ : Tuple= """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
A__ : Any= """abcdabcy"""
A__ : Dict= """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
A__ : str= """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 20 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 1_00_00_00 , SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
from math import factorial
_lowercase = {str(d): factorial(d) for d in range(10)}
def _A (UpperCamelCase : int ) ->int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase ) )
def _A () ->int:
'''simple docstring'''
lowerCamelCase__ : Dict = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , UpperCamelCase ) if sum_of_digit_factorial(UpperCamelCase ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 157 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __A ( A_ ):
UpperCamelCase :List[str] = '''gpt_neo'''
UpperCamelCase :Tuple = ['''past_key_values''']
UpperCamelCase :Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , __magic_name__=50257 , __magic_name__=2048 , __magic_name__=2048 , __magic_name__=24 , __magic_name__=[[["global", "local"], 12]] , __magic_name__=16 , __magic_name__=None , __magic_name__=256 , __magic_name__="gelu_new" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=1E-5 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=50256 , __magic_name__=50256 , **__magic_name__ , ):
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : List[Any] = num_layers
lowerCamelCase__ : List[Any] = num_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = window_size
lowerCamelCase__ : List[Any] = activation_function
lowerCamelCase__ : Any = resid_dropout
lowerCamelCase__ : Dict = embed_dropout
lowerCamelCase__ : str = attention_dropout
lowerCamelCase__ : str = classifier_dropout
lowerCamelCase__ : str = layer_norm_epsilon
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : int = use_cache
lowerCamelCase__ : List[Any] = bos_token_id
lowerCamelCase__ : int = eos_token_id
lowerCamelCase__ : str = attention_types
lowerCamelCase__ : List[str] = self.expand_attention_types_params(__magic_name__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
@staticmethod
def _snake_case (__magic_name__ ):
lowerCamelCase__ : Optional[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _A (UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ) ->int:
'''simple docstring'''
import torch
lowerCamelCase__ : Any = input.size()
lowerCamelCase__ : Tuple = len(UpperCamelCase )
lowerCamelCase__ : str = shape[dimension]
lowerCamelCase__ : Optional[int] = torch.arange(0 , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[Any] = torch.div(sizedim - size , UpperCamelCase , rounding_mode="""floor""" ) + 1
lowerCamelCase__ : Tuple = torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
lowerCamelCase__ : Dict = [slice(UpperCamelCase )] * rank
lowerCamelCase__ : Union[str, Any] = indices
lowerCamelCase__ : Optional[int] = input[s]
lowerCamelCase__ : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def _A (UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
import torch
lowerCamelCase__ : List[Any] = torch.arange(1 , UpperCamelCase )
lowerCamelCase__ : Any = torch.remainder(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = remainders == 0
lowerCamelCase__ : List[str] = candidates[divisor_indices]
lowerCamelCase__ : List[Any] = torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="""floor""" )
class __A ( A_ ):
@property
def _snake_case (self ):
lowerCamelCase__ : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="""inputs""" )
lowerCamelCase__ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ : int = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _snake_case (self ):
return self._config.num_heads
def _snake_case (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ):
lowerCamelCase__ : Union[str, Any] = super(__magic_name__ , self ).generate_dummy_inputs(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ ,lowerCamelCase__ : str = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ : Any = seqlen + 2
lowerCamelCase__ : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ : Dict = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(self.num_layers )
]
lowerCamelCase__ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ : int = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
return ordered_inputs
@property
def _snake_case (self ):
return 13
| 157 | 1 |
"""simple docstring"""
def A_ ( __lowercase , __lowercase ):
def get_matched_characters(__lowercase , __lowercase ) -> str:
UpperCamelCase_ : List[str] =[]
UpperCamelCase_ : Union[str, Any] =min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase_ : Dict =int(max(0 , i - limit ) )
UpperCamelCase_ : List[str] =int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__lowercase )
UpperCamelCase_ : Union[str, Any] =F'''{_stra[0:_stra.index(__lowercase )]} {_stra[_stra.index(__lowercase ) + 1:]}'''
return "".join(__lowercase )
# matching characters
UpperCamelCase_ : Union[str, Any] =get_matched_characters(__lowercase , __lowercase )
UpperCamelCase_ : List[Any] =get_matched_characters(__lowercase , __lowercase )
UpperCamelCase_ : List[Any] =len(__lowercase )
# transposition
UpperCamelCase_ : List[str] =(
len([(ca, ca) for ca, ca in zip(__lowercase , __lowercase ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase_ : Optional[Any] =0.0
else:
UpperCamelCase_ : Optional[int] =(
1
/ 3
* (
match_count / len(__lowercase )
+ match_count / len(__lowercase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase_ : Optional[int] =0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 700 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__SCREAMING_SNAKE_CASE = {
'facebook/mbart-large-en-ro': 1_024,
'facebook/mbart-large-cc25': 1_024,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ = MBartTokenizer
UpperCAmelCase__ = []
UpperCAmelCase__ = []
def __init__( self :Union[str, Any] , _lowerCamelCase :int=None , _lowerCamelCase :Optional[int]=None , _lowerCamelCase :List[Any]="<s>" , _lowerCamelCase :Any="</s>" , _lowerCamelCase :Union[str, Any]="</s>" , _lowerCamelCase :Tuple="<s>" , _lowerCamelCase :List[str]="<unk>" , _lowerCamelCase :Optional[int]="<pad>" , _lowerCamelCase :Optional[int]="<mask>" , _lowerCamelCase :str=None , _lowerCamelCase :Dict=None , _lowerCamelCase :str=None , **_lowerCamelCase :Any , ):
'''simple docstring'''
UpperCamelCase_ : str =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : List[Any] =vocab_file
UpperCamelCase_ : Dict =False if not self.vocab_file else True
UpperCamelCase_ : int =FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
UpperCamelCase_ : Dict ={
lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCamelCase_ : int =src_lang if src_lang is not None else 'en_XX'
UpperCamelCase_ : List[str] =self.convert_tokens_to_ids(self._src_lang )
UpperCamelCase_ : Dict =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self :Any , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : Any =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self :int , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : List[str] =[self.sep_token_id]
UpperCamelCase_ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Optional[str] , _lowerCamelCase :Optional[str] , **_lowerCamelCase :str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCamelCase_ : Optional[int] =src_lang
UpperCamelCase_ : Dict =self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_ : List[Any] =self.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =tgt_lang_id
return inputs
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :List[str] , _lowerCamelCase :str = "en_XX" , _lowerCamelCase :Optional[List[str]] = None , _lowerCamelCase :str = "ro_RO" , **_lowerCamelCase :Dict , ):
'''simple docstring'''
UpperCamelCase_ : str =src_lang
UpperCamelCase_ : Optional[int] =tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict =self.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_ : Tuple =[]
UpperCamelCase_ : List[str] =[self.eos_token_id, self.cur_lang_code]
UpperCamelCase_ : int =self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase_ : Any =self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase_ : List[str] =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :str ):
'''simple docstring'''
UpperCamelCase_ : str =self.convert_tokens_to_ids(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =[]
UpperCamelCase_ : int =[self.eos_token_id, self.cur_lang_code]
UpperCamelCase_ : List[str] =self.convert_ids_to_tokens(self.prefix_tokens )
UpperCamelCase_ : Any =self.convert_ids_to_tokens(self.suffix_tokens )
UpperCamelCase_ : int =processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self :str , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
UpperCamelCase_ : Any =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 395 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A_ = 50000
A_ = 5000
A_ , A_ = os.path.split(__file__)
A_ = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _UpperCamelCase ( A , A ):
for i in range(A ):
UpperCamelCase_ =dataset[i]
@get_duration
def _UpperCamelCase ( A , A , A ):
for i in range(0 , len(A ) , A ):
UpperCamelCase_ =dataset[i : i + batch_size]
@get_duration
def _UpperCamelCase ( A , A , A ):
with dataset.formatted_as(type=A ):
for i in range(A ):
UpperCamelCase_ =dataset[i]
@get_duration
def _UpperCamelCase ( A , A , A , A ):
with dataset.formatted_as(type=A ):
for i in range(0 , A , A ):
UpperCamelCase_ =dataset[i : i + batch_size]
def _UpperCamelCase ( ):
UpperCamelCase_ ={"num examples": SPEED_TEST_N_EXAMPLES}
UpperCamelCase_ =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
UpperCamelCase_ =[
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCamelCase_ =datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCamelCase_ =generate_example_dataset(
os.path.join(A , "dataset.arrow" ) , A , num_examples=A , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(A ) )
UpperCamelCase_ =func(A , **A )
print("shuffling dataset" )
UpperCamelCase_ =dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(A ) )
UpperCamelCase_ =func(
A , **A )
with open(A , "wb" ) as f:
f.write(json.dumps(A ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 391 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _UpperCamelCase ( A ):
return input_array.reshape((input_array.size, 1) )
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =np.nan
for i in range(A ):
UpperCamelCase_ =features[:, labels == i]
UpperCamelCase_ =data.mean(1 )
# Centralize the data of class i
UpperCamelCase_ =data - column_reshape(A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase_ =np.dot(A , centered_data.T )
return covariance_sum / features.shape[1]
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =features.mean(1 )
UpperCamelCase_ =np.nan
for i in range(A ):
UpperCamelCase_ =features[:, labels == i]
UpperCamelCase_ =data.shape[1]
UpperCamelCase_ =data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase_ =device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
return covariance_sum / features.shape[1]
def _UpperCamelCase ( A , A ):
# Check if the features have been loaded
if features.any():
UpperCamelCase_ =features.mean(1 )
# Center the dataset
UpperCamelCase_ =features - np.reshape(A , (data_mean.size, 1) )
UpperCamelCase_ =np.dot(A , centered_data.T ) / features.shape[1]
UpperCamelCase_ , UpperCamelCase_ =np.linalg.eigh(A )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase_ =np.dot(filtered_eigenvectors.T , A )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def _UpperCamelCase ( A , A , A , A ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase_ , UpperCamelCase_ =eigh(
covariance_between_classes(A , A , A ) , covariance_within_classes(A , A , A ) , )
UpperCamelCase_ =eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =np.linalg.svd(A )
UpperCamelCase_ =svd_matrix[:, 0:dimensions]
UpperCamelCase_ =np.dot(filtered_svd_matrix.T , A )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def _UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase_ =np.array([0, 0, 0, 1, 1] )
UpperCamelCase_ =2
UpperCamelCase_ =2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A ) as error_info:
UpperCamelCase_ =linear_discriminant_analysis(
A , A , A , A )
if isinstance(A , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def _UpperCamelCase ( ):
UpperCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase_ =2
UpperCamelCase_ =np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(A ) as error_info:
UpperCamelCase_ =principal_component_analysis(A , A )
if not np.allclose(A , A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391 | 1 |
'''simple docstring'''
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : str = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4)) | 714 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : str = 'true'
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=1_6 ):
set_seed(4_2 )
UpperCAmelCase_ : Optional[int] = RegressionModel()
UpperCAmelCase_ : Optional[int] = deepcopy(__lowercase )
UpperCAmelCase_ : Union[str, Any] = RegressionDataset(length=__lowercase )
UpperCAmelCase_ : Any = DataLoader(__lowercase , batch_size=__lowercase )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(__lowercase , __lowercase )
return model, ddp_model, dataloader
def snake_case_ ( __lowercase , __lowercase=False ):
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
UpperCAmelCase_ : List[Any] = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(__lowercase ):
UpperCAmelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowercase , max_length=__lowercase )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
__lowercase , batched=__lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
UpperCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowercase ):
if use_longest:
return tokenizer.pad(__lowercase , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(__lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(__lowercase , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1_6 )
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[int] = Accelerator(dispatch_batches=__lowercase , split_batches=__lowercase )
UpperCAmelCase_ : int = get_dataloader(__lowercase , not dispatch_batches )
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.prepare(__lowercase , __lowercase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Dict = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch.values()
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = [], []
for logit, targ in logits_and_targets:
logits.append(__lowercase )
targs.append(__lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.cat(__lowercase ), torch.cat(__lowercase )
return logits, targs
def snake_case_ ( __lowercase , __lowercase=8_2 , __lowercase=False , __lowercase=False , __lowercase=1_6 ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = get_basic_setup(__lowercase , __lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = generate_predictions(__lowercase , __lowercase , __lowercase )
assert (
len(__lowercase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__lowercase )}'''
def snake_case_ ( __lowercase = False , __lowercase = False ):
UpperCAmelCase_ : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_mrpc_setup(__lowercase , __lowercase )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup['''no''']
model.to(__lowercase )
model.eval()
for batch in dataloader:
batch.to(__lowercase )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**__lowercase )
UpperCAmelCase_ : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__lowercase , references=batch['''labels'''] )
UpperCAmelCase_ : Optional[int] = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : Optional[int] = model(**__lowercase )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Optional[int] = batch['''labels''']
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__lowercase , references=__lowercase )
UpperCAmelCase_ : Dict = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def snake_case_ ( ):
UpperCAmelCase_ : str = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__lowercase , __lowercase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[Any] = Accelerator(split_batches=__lowercase , dispatch_batches=__lowercase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__lowercase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
UpperCAmelCase_ : List[Any] = Accelerator()
test_torch_metrics(__lowercase , 5_1_2 )
accelerator.state._reset_state()
def snake_case_ ( __lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 641 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_UpperCamelCase = logging.get_logger(__name__)
@dataclass
class __a ( __magic_name__ ):
"""simple docstring"""
__UpperCamelCase : Dict = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **snake_case ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase__ : Any = deprecated_arg[3:]
setattr(self , snake_case , not kwargs.pop(snake_case ) )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowerCAmelCase__ : List[Any] = kwargs.pop("torchscript" , self.torchscript )
lowerCAmelCase__ : List[Any] = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
lowerCAmelCase__ : Any = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**snake_case )
__UpperCamelCase : bool = field(default=__magic_name__ , metadata={'help': 'Trace the models using torchscript'} )
__UpperCamelCase : bool = field(default=__magic_name__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__UpperCamelCase : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowerCAmelCase__ : int = torch.device("cpu" )
lowerCAmelCase__ : Union[str, Any] = 0
elif is_torch_tpu_available():
lowerCAmelCase__ : str = xm.xla_device()
lowerCAmelCase__ : int = 0
else:
lowerCAmelCase__ : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase__ : List[str] = torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.n_gpu > 0
| 453 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 0 |
from __future__ import annotations
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(__lowerCAmelCase ):
print(F'''{i}\t\t{d}''' )
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
for j in range(__lowerCAmelCase ):
_UpperCAmelCase = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> list[float]:
_UpperCAmelCase = [float("inf" )] * vertex_count
_UpperCAmelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__lowerCAmelCase ):
_UpperCAmelCase = (graph[j][k] for k in ["""src""", """dst""", """weight"""])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
_UpperCAmelCase = distance[u] + w
_UpperCAmelCase = check_negative_cycle(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = int(input("Enter number of vertices: ").strip())
__lowerCAmelCase = int(input("Enter number of edges: ").strip())
__lowerCAmelCase = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
__lowerCAmelCase = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
__lowerCAmelCase = {"src": src, "dst": dest, "weight": weight}
__lowerCAmelCase = int(input("\nEnter shortest path source:").strip())
__lowerCAmelCase = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 712 |
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = ["""keras_nlp"""]
def __init__( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ):
requires_backends(self , ["keras_nlp"] )
| 129 | 0 |
"""simple docstring"""
from __future__ import annotations
import time
_A = list[tuple[int, int]]
_A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCamelCase :
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Node | None ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = pos_x
lowerCAmelCase__ : Optional[Any] = pos_y
lowerCAmelCase__ : Optional[Any] = (pos_y, pos_x)
lowerCAmelCase__ : List[str] = goal_x
lowerCAmelCase__ : str = goal_y
lowerCAmelCase__ : Any = parent
class _lowerCamelCase :
def __init__( self : Any , UpperCamelCase : tuple[int, int] , UpperCamelCase : tuple[int, int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase )
lowerCAmelCase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase )
lowerCAmelCase__ : Any = [self.start]
lowerCAmelCase__ : List[Any] = False
def _lowerCAmelCase ( self : int ) -> Path | None:
"""simple docstring"""
while self.node_queue:
lowerCAmelCase__ : List[str] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase__ : Union[str, Any] = True
return self.retrace_path(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.get_successors(UpperCamelCase )
for node in successors:
self.node_queue.append(UpperCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Node ) -> list[Node]:
"""simple docstring"""
lowerCAmelCase__ : int = []
for action in delta:
lowerCAmelCase__ : Any = parent.pos_x + action[1]
lowerCAmelCase__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase , UpperCamelCase , self.target.pos_y , self.target.pos_x , UpperCamelCase ) )
return successors
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Node | None ) -> Path:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = node
lowerCAmelCase__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase__ : List[str] = current_node.parent
path.reverse()
return path
class _lowerCamelCase :
def __init__( self : str , UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = BreadthFirstSearch(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = BreadthFirstSearch(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = False
def _lowerCAmelCase ( self : str ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCAmelCase__ : Dict = self.fwd_bfs.node_queue.pop(0 )
lowerCAmelCase__ : Dict = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCAmelCase__ : Dict = True
return self.retrace_bidirectional_path(
UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = current_bwd_node
lowerCAmelCase__ : Optional[Any] = current_fwd_node
lowerCAmelCase__ : Any = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Node , UpperCamelCase : Node ) -> Path:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.fwd_bfs.retrace_path(UpperCamelCase )
lowerCAmelCase__ : str = self.bwd_bfs.retrace_path(UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase__ : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_A = (0, 0)
_A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_A = time.time()
_A = BreadthFirstSearch(init, goal)
_A = bfs.search()
_A = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
_A = time.time()
_A = BidirectionalBreadthFirstSearch(init, goal)
_A = bd_bfs.search()
_A = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 299 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase__ : Any = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCAmelCase__ : List[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] , **UpperCamelCase : Optional[Any] ) -> Any:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def _lowerCAmelCase ( self : str , **UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase )
def _lowerCAmelCase ( self : int , **UpperCamelCase : Tuple ) -> Dict:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCAmelCase__ : int = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : Dict = self.get_rust_tokenizer()
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : Dict = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
lowerCAmelCase__ : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase__ : Union[str, Any] = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
lowerCAmelCase__ : int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.get_image_processor()
lowerCAmelCase__ : Tuple = self.get_tokenizer()
lowerCAmelCase__ : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.prepare_image_inputs()
lowerCAmelCase__ : Any = image_processor(UpperCamelCase , return_tensors="""np""" )
lowerCAmelCase__ : Optional[Any] = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : Any = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Tuple = """lower newer"""
lowerCAmelCase__ : Union[str, Any] = processor(text=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = tokenizer(UpperCamelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.get_image_processor()
lowerCAmelCase__ : Optional[int] = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = """lower newer"""
lowerCAmelCase__ : Tuple = self.prepare_image_inputs()
lowerCAmelCase__ : Tuple = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.get_image_processor()
lowerCAmelCase__ : List[str] = self.get_tokenizer()
lowerCAmelCase__ : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ : str = processor.batch_decode(UpperCamelCase )
lowerCAmelCase__ : List[str] = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = self.get_image_processor()
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : int = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = """lower newer"""
lowerCAmelCase__ : Dict = self.prepare_image_inputs()
lowerCAmelCase__ : Optional[int] = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 299 | 1 |
'''simple docstring'''
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
else:
return a * actual_power(__lowerCAmelCase , int(b / 2 ) ) * actual_power(__lowerCAmelCase , int(b / 2 ) )
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
if b < 0:
return 1 / actual_power(__lowerCAmelCase , __lowerCAmelCase )
return actual_power(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 721 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """bart"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , snake_case_ : Tuple=5_0_2_6_5 , snake_case_ : Dict=1_0_2_4 , snake_case_ : int=1_2 , snake_case_ : int=4_0_9_6 , snake_case_ : str=1_6 , snake_case_ : List[Any]=1_2 , snake_case_ : List[Any]=4_0_9_6 , snake_case_ : Any=1_6 , snake_case_ : str=0.0 , snake_case_ : Optional[int]=0.0 , snake_case_ : List[Any]="gelu" , snake_case_ : List[Any]=1_0_2_4 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=0.0_2 , snake_case_ : Dict=0.0 , snake_case_ : str=False , snake_case_ : Optional[int]=True , snake_case_ : Any=3 , snake_case_ : int=1 , snake_case_ : int=0 , snake_case_ : Optional[Any]=2 , snake_case_ : str=True , snake_case_ : int=2 , snake_case_ : Union[str, Any]=2 , **snake_case_ : int , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : int = max_position_embeddings
snake_case__ : List[str] = d_model
snake_case__ : Optional[int] = encoder_ffn_dim
snake_case__ : Union[str, Any] = encoder_layers
snake_case__ : Tuple = encoder_attention_heads
snake_case__ : List[Any] = decoder_ffn_dim
snake_case__ : Optional[Any] = decoder_layers
snake_case__ : Tuple = decoder_attention_heads
snake_case__ : Any = dropout
snake_case__ : str = attention_dropout
snake_case__ : Optional[int] = activation_dropout
snake_case__ : Tuple = activation_function
snake_case__ : Optional[int] = init_std
snake_case__ : Optional[Any] = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : Any = classifier_dropout
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = encoder_layers
snake_case__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , snake_case_ ):
snake_case__ : Union[str, Any] = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[int] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ : List[str] = {0: '''batch'''}
snake_case__ : int = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ , snake_case__ : Tuple = self.num_layers
for i in range(snake_case_ ):
snake_case__ : List[str] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = super().outputs
else:
snake_case__ : List[Any] = super(snake_case_ , self ).outputs
if self.use_past:
snake_case__ , snake_case__ : Dict = self.num_layers
for i in range(snake_case_ ):
snake_case__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __magic_name__ ( self : Optional[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# Generate decoder inputs
snake_case__ : Dict = seq_length if not self.use_past else 1
snake_case__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : List[str] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : str = dict(**snake_case_ , **snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : str = common_inputs['''input_ids'''].shape
snake_case__ : Dict = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ , snake_case__ : Optional[Any] = self.num_attention_heads
snake_case__ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[Any] = decoder_seq_length + 3
snake_case__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : Optional[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(snake_case_ , snake_case_ )] , dim=1 )
snake_case__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : str = self.num_layers
snake_case__ : Any = min(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = max(snake_case_ , snake_case_ ) - min_num_layers
snake_case__ : Optional[int] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(snake_case_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
torch.zeros(snake_case_ ),
) )
# TODO: test this.
snake_case__ : int = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(snake_case_ , snake_case_ ):
common_inputs["past_key_values"].append((torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) )
return common_inputs
def __magic_name__ ( self : List[str] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : str = seqlen + 2
snake_case__ , snake_case__ : Union[str, Any] = self.num_layers
snake_case__ , snake_case__ : Optional[int] = self.num_attention_heads
snake_case__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : List[Any] = common_inputs['''attention_mask'''].dtype
snake_case__ : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
snake_case__ : Optional[Any] = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(snake_case_ )
]
return common_inputs
def __magic_name__ ( self : int , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
snake_case__ : str = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : List[Any] = tokenizer.num_special_tokens_to_add(snake_case_ )
snake_case__ : List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Any = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : Any = dict(tokenizer(snake_case_ , return_tensors=snake_case_ ) )
return common_inputs
def __magic_name__ ( self : Optional[Any] , snake_case_ : PreTrainedTokenizer , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
elif self.task == "causal-lm":
snake_case__ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
else:
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
return common_inputs
def __magic_name__ ( self : Tuple , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Any ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = super()._flatten_past_key_values_(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__ : Union[str, Any] = super(snake_case_ , self )._flatten_past_key_values_(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
| 502 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
_UpperCamelCase = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''', type=__snake_case, default=1, help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''', type=__snake_case, help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
), )
# rest from the training program
parser.add_argument('''training_script_args''', nargs=__snake_case )
return parser.parse_args()
def lowerCamelCase__ ( ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = parse_args()
# Import training_script as a module.
_UpperCamelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCamelCase = script_fpath.stem
_UpperCamelCase = importlib.import_module(__snake_case )
# Patch sys.argv
_UpperCamelCase = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 19 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 19 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
snake_case : Dict = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCamelCase ):
def __init__( self : Dict , *_A : Any , **_A : Dict):
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , _A , )
super().__init__(*_A , **_A) | 182 |
from collections.abc import Callable
import numpy as np
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
"""simple docstring"""
A__ : Any = int(np.ceil((x_end - xa) / step_size ) )
A__ : Union[str, Any] = np.zeros((n + 1,) )
A__ : Any = ya
A__ : Union[str, Any] = xa
for k in range(__lowercase ):
A__ : Any = y[k] + step_size * ode_func(__lowercase , y[k] )
A__ : Any = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 | 1 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def a ( lowerCamelCase__=32 , lowerCamelCase__=10 , lowerCamelCase__=1_00 , lowerCamelCase__=10_26 , lowerCamelCase__=True , lowerCamelCase__="data/tokenized_stories_train_wikitext103.jbl" , lowerCamelCase__="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
A_, A_ : Any = generate_datasets(
lowerCamelCase__ , lowerCamelCase__ , number=lowerCamelCase__ , min_len=10_26 , trim=lowerCamelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
A_ : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
A_ : Any = load_gpta("""gpt2""" ).to(lowerCamelCase__ )
print("""computing perplexity on objective set""" )
A_ : Tuple = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).item()
print("""perplexity on objective set:""" , lowerCamelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def a ( lowerCamelCase__ , lowerCamelCase__=15 , lowerCamelCase__=1_28 , lowerCamelCase__=1_00 , lowerCamelCase__="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
A_ : Union[str, Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
A_ : Tuple = SecondaryLearner(lowerCamelCase__ )
# Train secondary learner
A_ : int = train_secondary_learner(
lowerCamelCase__ , lowerCamelCase__ , max_epochs=lowerCamelCase__ , batch_size=lowerCamelCase__ , eval_freq=1_00 , igf_model_path=lowerCamelCase__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=32 , lowerCamelCase__=10_00 , lowerCamelCase__=16 , lowerCamelCase__=1.0 , lowerCamelCase__=recopy_gpta , lowerCamelCase__=None , lowerCamelCase__=10 , lowerCamelCase__="gpt2_finetuned.pt" , ):
'''simple docstring'''
A_ : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
A_ : List[Any] = RandomSampler(lowerCamelCase__ )
A_ : Dict = DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ )
A_ : Union[str, Any] = max_steps // (len(lowerCamelCase__ )) + 1
A_ : Optional[Any] = 0
A_ : str = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCamelCase__ )
A_, A_, A_ : Dict = recopy_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCamelCase__ )
secondary_learner.eval()
A_ : Any = []
A_ : List[str] = 0
A_ : Any = []
A_ : Any = []
# Compute the performance of the transformer model at the beginning
A_ : Dict = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
test_perps.append(lowerCamelCase__ )
print("""Test perplexity, step""" , lowerCamelCase__ , """:""" , lowerCamelCase__ )
for epoch in range(int(lowerCamelCase__ ) ):
for step, example in enumerate(lowerCamelCase__ ):
torch.cuda.empty_cache()
A_ : Dict = random.randint(0 , example.size(2 ) - context_len - 1 )
A_ : Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
A_ : Union[str, Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
A_ : Any = True
if secondary_learner is not None:
A_ : Dict = secondary_learner.forward(
torch.tensor(lowerCamelCase__ , dtype=torch.long , device=lowerCamelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCamelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
A_ : Optional[Any] = -1
if predicted_q < threshold:
A_ : Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
A_ : List[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
A_ : Dict = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
A_ : List[str] = compute_perplexity(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
test_perps.append(lowerCamelCase__ )
print("""Test perplexity, step""" , lowerCamelCase__ , """:""" , lowerCamelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCamelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def a ( ):
'''simple docstring'''
A_ : Optional[int] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=lowerCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_00 , type=lowerCamelCase__ , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_00 , type=lowerCamelCase__ , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=10_00 , type=lowerCamelCase__ , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_28 , type=lowerCamelCase__ , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=lowerCamelCase__ , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=lowerCamelCase__ , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_00 , type=lowerCamelCase__ , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=10_26 , type=lowerCamelCase__ , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=lowerCamelCase__ , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=lowerCamelCase__ , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=lowerCamelCase__ , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=lowerCamelCase__ , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
A_ : int = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
A_ : Tuple = training_secondary_learner(
lowerCamelCase__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
A_ : Tuple = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
A_, A_ : int = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_00 , min_len=10_26 , trim=lowerCamelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=lowerCamelCase__ , secondary_learner=lowerCamelCase__ , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main() | 667 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 667 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def lowerCAmelCase__ ( a_ : Optional[int] , a_ : List[str] , a_ : Optional[Any] ) -> None:
UpperCAmelCase__ : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ ), f"""{len(lowerCamelCase__ )} != {len(lowerCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCamelCase_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCamelCase_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCAmelCase__ ( a_ : Optional[Any] , a_ : Optional[Any] ) -> Union[str, Any]:
try:
UpperCAmelCase__ : Optional[Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(lowerCamelCase__ ) )
def lowerCAmelCase__ ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(lowerCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCAmelCase__ ( a_ : List[Any] , a_ : Optional[int] = "student" , a_ : str = None , a_ : Tuple = None , a_ : Tuple=False , a_ : List[Any]=None , a_ : Union[str, Any]=None , **a_ : Optional[Any] , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
UpperCAmelCase__ : Dict = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
AutoTokenizer.from_pretrained(lowerCamelCase__ ).save_pretrained(lowerCamelCase__ ) # purely for convenience
UpperCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ).eval()
else:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), f"""teacher must be a model or string got type {type(lowerCamelCase__ )}"""
UpperCAmelCase__ : str = teacher.config.to_diff_dict()
try:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase__ : Tuple = teacher_e
if d is None:
UpperCAmelCase__ : Dict = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase__ , UpperCAmelCase__ : str = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase__ : Dict = teacher_e
if d is None:
UpperCAmelCase__ : Dict = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase__ )
# Copy weights
UpperCAmelCase__ : Tuple = teacher.config_class(**lowerCamelCase__ )
UpperCAmelCase__ : Tuple = AutoModelForSeqaSeqLM.from_config(lowerCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase__ : str = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = list(range(lowerCamelCase__ ) ), list(range(lowerCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(lowerCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase__ : List[Any] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
if d_layers_to_copy is None:
UpperCAmelCase__ : List[str] = pick_layers_to_copy(lowerCamelCase__ , lowerCamelCase__ )
try:
if hasattr(
lowerCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
UpperCAmelCase__ : int = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers) | 721 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class __UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) | 599 | 0 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
_A : torch.FloatTensor
_A : Optional[torch.FloatTensor] = None
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_=0.999 , UpperCAmelCase_="cosine" , ) ->str:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCAmelCase : Union[str, Any] = []
for i in range(UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = i / num_diffusion_timesteps
__UpperCAmelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , __lowercase : int = 1_000 , __lowercase : str = "fixed_small_log" , __lowercase : bool = True , __lowercase : Optional[float] = 1.0 , __lowercase : str = "epsilon" , __lowercase : str = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
__UpperCAmelCase : Union[str, Any] = betas_for_alpha_bar(__lowercase )
__UpperCAmelCase : Tuple = 1.0 - self.betas
__UpperCAmelCase : Tuple = torch.cumprod(self.alphas , dim=0 )
__UpperCAmelCase : Union[str, Any] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__UpperCAmelCase : Tuple = 1.0
# setable values
__UpperCAmelCase : Dict = None
__UpperCAmelCase : Union[str, Any] = torch.from_numpy(np.arange(0 , __lowercase )[::-1].copy() )
__UpperCAmelCase : List[Any] = variance_type
def A_ ( self : List[str] , __lowercase : torch.FloatTensor , __lowercase : Optional[int] = None ):
'''simple docstring'''
return sample
def A_ ( self : List[str] , __lowercase : int , __lowercase : Union[str, torch.device] = None ):
'''simple docstring'''
__UpperCAmelCase : List[str] = num_inference_steps
__UpperCAmelCase : List[Any] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__UpperCAmelCase : Dict = (np.arange(0 , __lowercase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__UpperCAmelCase : List[Any] = torch.from_numpy(__lowercase ).to(__lowercase )
def A_ ( self : Dict , __lowercase : Optional[int] , __lowercase : Dict=None , __lowercase : int=None , __lowercase : Any=None ):
'''simple docstring'''
if prev_timestep is None:
__UpperCAmelCase : str = t - 1
__UpperCAmelCase : Union[str, Any] = self.alphas_cumprod[t]
__UpperCAmelCase : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCAmelCase : Tuple = 1 - alpha_prod_t
__UpperCAmelCase : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCAmelCase : Optional[Any] = self.betas[t]
else:
__UpperCAmelCase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCAmelCase : List[str] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__UpperCAmelCase : List[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__UpperCAmelCase : Any = torch.log(torch.clamp(__lowercase , min=1e-20 ) )
__UpperCAmelCase : int = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__UpperCAmelCase : int = variance.log()
__UpperCAmelCase : int = beta.log()
__UpperCAmelCase : Union[str, Any] = (predicted_variance + 1) / 2
__UpperCAmelCase : Dict = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self : List[Any] , __lowercase : torch.FloatTensor , __lowercase : int , __lowercase : torch.FloatTensor , __lowercase : Optional[int] = None , __lowercase : int=None , __lowercase : bool = True , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__UpperCAmelCase , __UpperCAmelCase : Dict = torch.split(__lowercase , sample.shape[1] , dim=1 )
else:
__UpperCAmelCase : str = None
# 1. compute alphas, betas
if prev_timestep is None:
__UpperCAmelCase : Any = t - 1
__UpperCAmelCase : int = self.alphas_cumprod[t]
__UpperCAmelCase : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCAmelCase : Optional[int] = 1 - alpha_prod_t
__UpperCAmelCase : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCAmelCase : List[Any] = self.betas[t]
__UpperCAmelCase : Any = self.alphas[t]
else:
__UpperCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
__UpperCAmelCase : Union[str, Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCAmelCase : Union[str, Any] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCAmelCase : str = torch.clamp(
__lowercase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__UpperCAmelCase : Any = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Optional[int] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCAmelCase : Any = 0
if t > 0:
__UpperCAmelCase : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__lowercase , device=model_output.device )
__UpperCAmelCase : Tuple = self._get_variance(
__lowercase , predicted_variance=__lowercase , prev_timestep=__lowercase , )
if self.variance_type == "fixed_small_log":
__UpperCAmelCase : Optional[Any] = variance
elif self.variance_type == "learned_range":
__UpperCAmelCase : List[str] = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
''' for the UnCLIPScheduler.''' )
__UpperCAmelCase : List[Any] = variance * variance_noise
__UpperCAmelCase : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__lowercase , pred_original_sample=__lowercase )
def A_ ( self : Dict , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor , __lowercase : torch.IntTensor , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__UpperCAmelCase : Union[str, Any] = timesteps.to(original_samples.device )
__UpperCAmelCase : str = alphas_cumprod[timesteps] ** 0.5
__UpperCAmelCase : Tuple = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCAmelCase : Union[str, Any] = sqrt_alpha_prod.unsqueeze(-1 )
__UpperCAmelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCAmelCase : int = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCAmelCase : Dict = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__UpperCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 522 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[str]:
"""simple docstring"""
if index == r:
for j in range(UpperCAmelCase_ ):
print(data[j] , end=''' ''' )
print(''' ''' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__UpperCAmelCase : Optional[Any] = arr[i]
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 , UpperCAmelCase_ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 0 , UpperCAmelCase_ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase__ :int = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu | 522 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = [[0] * n for i in range(lowerCAmelCase )]
for i in range(lowerCAmelCase ):
UpperCAmelCase = y_points[i]
for i in range(2 , lowerCAmelCase ):
for j in range(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 378 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase_ : Any = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCAmelCase , config=lowerCAmelCase )
UpperCAmelCase = downstream_dict["""projector.weight"""]
UpperCAmelCase = downstream_dict["""projector.bias"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.post_net.linear.bias"""]
return model
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCAmelCase , config=lowerCAmelCase )
UpperCAmelCase = downstream_dict["""model.linear.weight"""]
UpperCAmelCase = downstream_dict["""model.linear.bias"""]
return model
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = UniSpeechSatForXVector.from_pretrained(lowerCAmelCase , config=lowerCAmelCase )
UpperCAmelCase = downstream_dict["""connector.weight"""]
UpperCAmelCase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
UpperCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
UpperCAmelCase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = torch.load(lowerCAmelCase , map_location="""cpu""" )
UpperCAmelCase = checkpoint["""Downstream"""]
UpperCAmelCase = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
lowerCAmelCase , return_attention_mask=lowerCAmelCase , do_normalize=lowerCAmelCase )
UpperCAmelCase = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
UpperCAmelCase = convert_classification(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
UpperCAmelCase = convert_diarization(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
elif arch.endswith("""ForXVector""" ):
UpperCAmelCase = convert_xvector(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowerCAmelCase )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowerCAmelCase_ : str = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 378 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self : Any ):
lowerCAmelCase__ = 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = (32, 32)
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def __snake_case ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __snake_case ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __snake_case ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCAmelCase__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
@property
def __snake_case ( self : int ):
def extract(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Tuple ):
class lowerCAmelCase_ :
def __init__( self : int ):
lowerCAmelCase__ = torch.ones([0] )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str ):
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def __snake_case ( self : Any ):
lowerCAmelCase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.dummy_cond_unet
lowerCAmelCase__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ = 77
lowerCAmelCase__ = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
# put models in fp16
lowerCAmelCase__ = unet.half()
lowerCAmelCase__ = vae.half()
lowerCAmelCase__ = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase__ = init_image.resize((760, 504) )
lowerCAmelCase__ = '''BAAI/AltDiffusion'''
lowerCAmelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowerCAmelCase__ = output.images[0]
lowerCAmelCase__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCAmelCase__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCAmelCase__ = init_image.resize((768, 512) )
lowerCAmelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCAmelCase__ = '''BAAI/AltDiffusion'''
lowerCAmelCase__ = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
lowerCAmelCase__ = '''A fantasy landscape, trending on artstation'''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 1 |
"""simple docstring"""
import math
def UpperCamelCase ( _A , _A ) -> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 348 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase (__snake_case ):
def __init__( self :List[str] , __magic_name__ :Dict ) ->Optional[int]:
lowercase : int = data
def __iter__( self :List[Any] ) ->Optional[Any]:
for element in self.data:
yield element
def UpperCamelCase ( _A=True ) -> Any:
lowercase : Optional[int] = Accelerator(even_batches=_A )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCamelCase ( _A , _A , _A , _A = False ) -> Any:
if iterable:
lowercase : Optional[Any] = DummyIterableDataset(torch.as_tensor(range(_A ) ) )
else:
lowercase : Optional[int] = TensorDataset(torch.as_tensor(range(_A ) ) )
lowercase : Dict = DataLoader(_A , batch_size=_A )
lowercase : Union[str, Any] = accelerator.prepare(_A )
return dl
def UpperCamelCase ( _A , _A , _A , _A , _A , ) -> str:
lowercase : Optional[int] = create_dataloader(accelerator=_A , dataset_size=_A , batch_size=_A )
lowercase : Optional[Any] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCamelCase ( ) -> Dict:
lowercase : Optional[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCamelCase ( ) -> str:
lowercase : Dict = create_accelerator(even_batches=_A )
verify_dataloader_batch_sizes(
_A , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
_A , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCamelCase ( ) -> Optional[int]:
lowercase : List[str] = create_accelerator(even_batches=_A )
lowercase : List[str] = torch.nn.Linear(1 , 1 )
lowercase : List[Any] = accelerator.prepare(_A )
lowercase : Optional[int] = create_dataloader(_A , dataset_size=3 , batch_size=1 )
lowercase : Any = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(_A ):
lowercase : Union[str, Any] = ddp_model(batch[0].float() )
lowercase : Optional[Any] = output.sum()
loss.backward()
batch_idxs.append(_A )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCamelCase ( _A ) -> Dict:
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCamelCase ( ) -> Optional[Any]:
lowercase : Union[str, Any] = True
lowercase : str = False
lowercase : Union[str, Any] = create_accelerator(even_batches=_A )
lowercase : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase : Dict = accelerator.prepare(_A )
lowercase : int = create_dataloader(_A , dataset_size=3 , batch_size=1 )
lowercase : List[Any] = create_dataloader(_A , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
lowercase : List[Any] = train_dl.batch_sampler.even_batches
lowercase : Optional[int] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase ( ) -> int:
lowercase : Optional[Any] = True
lowercase : Dict = False
lowercase : Union[str, Any] = create_accelerator(even_batches=_A )
lowercase : Optional[Any] = torch.nn.Linear(1 , 1 )
lowercase : int = accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
lowercase : int = create_dataloader(_A , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
lowercase : Any = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCamelCase ( ) -> Any:
lowercase : Optional[Any] = create_accelerator()
lowercase : Optional[int] = torch.nn.Linear(1 , 1 )
lowercase : List[Any] = accelerator.prepare(_A )
create_dataloader(_A , dataset_size=3 , batch_size=1 , iterable=_A )
with warnings.catch_warnings(record=_A ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=_A ):
pass
assert issubclass(w[-1].category , _A )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCamelCase ( ) -> List[str]:
lowercase : List[Any] = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
lowercase : str = accelerator.state.distributed_type
lowercase : int = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(_A )
lowercase : Optional[Any] = original_state
if __name__ == "__main__":
main()
| 348 | 1 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def a ( self : List[str] )-> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCAmelCase_ : Dict = Vector()
def a ( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase_ : str = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(a_ ) , """(0,0,0,0,0,1)""" )
def a ( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase_ : str = Vector([1, 2, 3, 4] )
self.assertEqual(len(a_ ) , 4 )
def a ( self : List[Any] )-> None:
"""simple docstring"""
UpperCAmelCase_ : Any = Vector([1, 2] )
UpperCAmelCase_ : Any = Vector([1, 2, 3, 4, 5] )
UpperCAmelCase_ : List[str] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def a ( self : Tuple )-> None:
"""simple docstring"""
UpperCAmelCase_ : Any = Vector([1, 2, 3] )
UpperCAmelCase_ : int = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def a ( self : Optional[int] )-> None:
"""simple docstring"""
UpperCAmelCase_ : int = Vector([1, 2, 3] )
UpperCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def a ( self : Optional[int] )-> None:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : Dict = Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase_ : List[str] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def a ( self : List[Any] )-> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def a ( self : Optional[int] )-> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def a ( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : List[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , a_ , a_ ) ) , """(3,4,7)""" )
def a ( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase_ : str = Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : Optional[Any] = x.copy()
self.assertEqual(str(a_ ) , str(a_ ) )
def a ( self : Any )-> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(a_ ) , """(0,1,0)""" )
def a ( self : Tuple )-> None:
"""simple docstring"""
UpperCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(a_ ) )
def a ( self : str )-> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : int = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(a_ , a_ ) )
def a ( self : Tuple )-> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : Optional[int] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(a_ , a_ ) )
def a ( self : Optional[Any] )-> None:
"""simple docstring"""
UpperCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def a ( self : Optional[Any] )-> None:
"""simple docstring"""
UpperCAmelCase_ : Tuple = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCAmelCase_ : List[str] = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def a ( self : Any )-> None:
"""simple docstring"""
UpperCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(a_ ) )
def a ( self : Union[str, Any] )-> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def a ( self : int )-> None:
"""simple docstring"""
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def a ( self : int )-> None:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def a ( self : Dict )-> None:
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 470 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def A_ ( lowercase , lowercase , lowercase = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ : Tuple = quote(lowercase )
return hfh.hf_hub_url(lowercase , lowercase , repo_type="""dataset""" , revision=lowercase )
| 470 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 420 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 420 | 1 |
"""simple docstring"""
from statistics import mean, stdev
def _lowerCamelCase ( lowerCamelCase__ : list , lowerCamelCase__ : int = 3 ):
lowercase__ : List[Any] = min(lowerCamelCase__ )
lowercase__ : str = max(lowerCamelCase__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCamelCase__ ) for x in data]
def _lowerCamelCase ( lowerCamelCase__ : list , lowerCamelCase__ : int = 3 ):
lowercase__ : Optional[int] = mean(lowerCamelCase__ )
lowercase__ : str = stdev(lowerCamelCase__ )
# standardize data
return [round((x - mu) / (sigma) , lowerCamelCase__ ) for x in data] | 200 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowercase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowercase__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
lowercase__ : Any = os.path.join(self.tmpdirname , lowerCamelCase__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> str:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self , **lowerCamelCase__ ) -> List[Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : Any = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__( self ) -> int:
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer()
lowercase__ : Any = self.get_image_processor()
lowercase__ : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase__ )
lowercase__ : List[Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase__ : Any = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
lowercase__ : Union[str, Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Optional[int] = self.prepare_image_inputs()
lowercase__ : Tuple = image_processor(lowerCamelCase__ , return_tensors="""np""" )
lowercase__ : str = processor(images=lowerCamelCase__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Union[str, Any] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : List[str] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Any = """lower newer"""
lowercase__ : List[Any] = processor(text=lowerCamelCase__ )
lowercase__ : Tuple = tokenizer(lowerCamelCase__ , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__( self ) -> str:
lowercase__ : List[Any] = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : List[Any] = """lower newer"""
lowercase__ : List[str] = self.prepare_image_inputs()
lowercase__ : List[Any] = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : int = self.get_image_processor()
lowercase__ : Union[str, Any] = self.get_tokenizer()
lowercase__ : Tuple = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Tuple = processor.batch_decode(lowerCamelCase__ )
lowercase__ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
lowercase__ : str = """lower newer"""
lowercase__ : List[Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 200 | 1 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowercase : List[Any] = "scheduler_config.json"
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Any = 3
UpperCAmelCase_ : Optional[Any] = 4
UpperCAmelCase_ : List[Any] = 5
UpperCAmelCase_ : str = 6
UpperCAmelCase_ : Dict = 7
UpperCAmelCase_ : int = 8
UpperCAmelCase_ : Any = 9
UpperCAmelCase_ : Tuple = 10
UpperCAmelCase_ : Tuple = 11
UpperCAmelCase_ : Optional[int] = 12
UpperCAmelCase_ : List[Any] = 13
UpperCAmelCase_ : List[Any] = 14
@dataclass
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : torch.FloatTensor
class __lowercase :
"""simple docstring"""
UpperCAmelCase_ : Dict = SCHEDULER_CONFIG_NAME
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Optional[int] = True
@classmethod
def snake_case ( cls , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[Any]:
A , A , A : List[str] = cls.load_config(
pretrained_model_name_or_path=__UpperCAmelCase , subfolder=__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , return_commit_hash=__UpperCAmelCase , **__UpperCAmelCase , )
return cls.from_config(__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = False , **__UpperCAmelCase ) -> Dict:
self.save_config(save_directory=__UpperCAmelCase , push_to_hub=__UpperCAmelCase , **__UpperCAmelCase )
@property
def snake_case ( self ) -> Union[str, Any]:
return self._get_compatibles()
@classmethod
def snake_case ( cls ) -> Optional[int]:
A : int = list(set([cls.__name__] + cls._compatibles ) )
A : Union[str, Any] = importlib.import_module(__name__.split('''.''' )[0] )
A : Union[str, Any] = [
getattr(__UpperCAmelCase , __UpperCAmelCase ) for c in compatible_classes_str if hasattr(__UpperCAmelCase , __UpperCAmelCase )
]
return compatible_classes
| 423 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TextToVideoSDPipeline
UpperCAmelCase_ : str = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase_ : Dict = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ) -> int:
torch.manual_seed(0 )
A : Any = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
A : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
A : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
A : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
A : int = CLIPTextModel(__UpperCAmelCase )
A : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A : Optional[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> List[Any]:
if str(__UpperCAmelCase ).startswith('''mps''' ):
A : List[str] = torch.manual_seed(__UpperCAmelCase )
else:
A : Optional[Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
A : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def snake_case ( self ) -> List[str]:
A : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A : Dict = self.get_dummy_components()
A : Any = TextToVideoSDPipeline(**__UpperCAmelCase )
A : Union[str, Any] = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
A : Optional[int] = '''np'''
A : Dict = sd_pipe(**__UpperCAmelCase ).frames
A : Tuple = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A : Any = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ) -> Dict:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case ( self ) -> Any:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def snake_case ( self ) -> Tuple:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def snake_case ( self ) -> List[str]:
pass
def snake_case ( self ) -> Dict:
return super().test_progress_bar()
@slow
@skip_mps
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> int:
A : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
A : List[Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
A : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A : Optional[int] = pipe.to('''cuda''' )
A : List[Any] = '''Spiderman is surfing'''
A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : List[Any] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=25 , output_type='''pt''' ).frames
A : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def snake_case ( self ) -> Union[str, Any]:
A : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
A : Tuple = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
A : Any = pipe.to('''cuda''' )
A : int = '''Spiderman is surfing'''
A : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : List[Any] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''pt''' ).frames
A : List[str] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 423 | 1 |
from torch import nn
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_lowercase =class_size
_lowercase =embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
_lowercase =nn.Linear(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.mlp(lowerCAmelCase )
return logits
| 291 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a_ : Any = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
a_ : Optional[Any] = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __snake_case ( ):
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def __snake_case ( ):
lowerCamelCase_ = "rougeLsum"
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __snake_case ( ):
lowerCamelCase_ = ["rouge1", "rouge2", "rougeL"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def __snake_case ( ):
lowerCamelCase_ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCamelCase_ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def __snake_case ( ):
lowerCamelCase_ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCamelCase_ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=UpperCAmelCase_ )["rougeLsum"]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def __snake_case ( ):
lowerCamelCase_ = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 675 | 0 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int=9_9 , lowerCAmelCase_ : Dict=1_3 , lowerCAmelCase_ : int=1_6 , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : str=3_2 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : Union[str, Any]=4 , lowerCAmelCase_ : List[Any]=3_0 , lowerCAmelCase_ : int=0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Tuple=None , ):
"""simple docstring"""
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = decoder_seq_length
# For common tests
lowercase_ = self.decoder_seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = d_model
lowercase_ = d_model
lowercase_ = decoder_layers
lowercase_ = decoder_layers
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_attention_heads
lowercase_ = decoder_attention_heads
lowercase_ = eos_token_id
lowercase_ = bos_token_id
lowercase_ = pad_token_id
lowercase_ = decoder_start_token_id
lowercase_ = use_cache
lowercase_ = max_position_embeddings
lowercase_ = None
lowercase_ = decoder_seq_length
lowercase_ = 2
lowercase_ = 1
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
lowercase_ = None
if self.use_attention_mask:
lowercase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2)
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
lowercase_ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
lowercase_ = True
lowercase_ = TrOCRDecoder(config=lowerCAmelCase_).to(lowerCAmelCase_).eval()
lowercase_ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase_ = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_)
lowercase_ = model(lowerCAmelCase_ , use_cache=lowerCAmelCase_)
self.parent.assertTrue(len(lowerCAmelCase_) == len(lowerCAmelCase_))
self.parent.assertTrue(len(lowerCAmelCase_) == len(lowerCAmelCase_) + 1)
lowercase_ = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
lowercase_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase_ = model(lowerCAmelCase_)["""last_hidden_state"""]
lowercase_ = model(lowerCAmelCase_ , past_key_values=lowerCAmelCase_)["""last_hidden_state"""]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase__ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ = True
lowercase__ = False
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCAmelCase_)
lowercase_ = ConfigTester(self , config_class=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[str]):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : int):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCAmelCase_)
def _UpperCAmelCase ( self : Any):
"""simple docstring"""
return
@unittest.skip("""The model doesn't support left padding""") # and it's not used enough to be worth fixing :)
def _UpperCAmelCase ( self : str):
"""simple docstring"""
pass
| 702 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase : Any = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Any = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[int] = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = ProphetNetTokenizer
a_ = False
def lowercase ( self : Union[str, Any] ) -> List[Any]:
super().setUp()
__lowerCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase ( self : Tuple , lowerCAmelCase_ : str ) -> List[str]:
__lowerCAmelCase = 'UNwant\u00E9d,running'
__lowerCAmelCase = 'unwanted, running'
return input_text, output_text
def lowercase ( self : str ) -> Optional[Any]:
__lowerCAmelCase = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(lowerCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def lowercase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase ( self : Tuple ) -> Any:
__lowerCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def lowercase ( self : Optional[Any] ) -> int:
__lowerCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowerCAmelCase = {}
for i, token in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = i
__lowerCAmelCase = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase ( self : Dict ) -> int:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def lowercase ( self : int ) -> Any:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 53 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
def __init__( self : Any , __A : Optional[int] , __A : Optional[int]=2 , __A : int=3 , __A : Union[str, Any]=4 , __A : Tuple=2 , __A : Union[str, Any]=7 , __A : Any=True , __A : List[str]=True , __A : Tuple=True , __A : Tuple=True , __A : List[str]=99 , __A : Tuple=36 , __A : Union[str, Any]=3 , __A : str=4 , __A : str=37 , __A : int="gelu" , __A : Union[str, Any]=0.1 , __A : str=0.1 , __A : List[Any]=512 , __A : Optional[int]=16 , __A : int=2 , __A : List[Any]=0.02 , __A : Optional[Any]=6 , __A : int=6 , __A : str=3 , __A : Optional[int]=4 , __A : Union[str, Any]=None , __A : Tuple=1000 , ) ->Any:
"""simple docstring"""
a__ :Any = parent
a__ :Optional[int] = batch_size
a__ :Union[str, Any] = num_channels
a__ :Any = image_size
a__ :Optional[Any] = patch_size
a__ :Optional[Any] = text_seq_length
a__ :int = is_training
a__ :Tuple = use_input_mask
a__ :Any = use_token_type_ids
a__ :int = use_labels
a__ :str = vocab_size
a__ :List[str] = hidden_size
a__ :Optional[int] = num_hidden_layers
a__ :List[str] = num_attention_heads
a__ :List[str] = intermediate_size
a__ :int = hidden_act
a__ :Optional[Any] = hidden_dropout_prob
a__ :Union[str, Any] = attention_probs_dropout_prob
a__ :int = max_position_embeddings
a__ :Tuple = type_vocab_size
a__ :Union[str, Any] = type_sequence_label_size
a__ :List[Any] = initializer_range
a__ :str = coordinate_size
a__ :Union[str, Any] = shape_size
a__ :int = num_labels
a__ :Optional[int] = num_choices
a__ :str = scope
a__ :int = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ :str = text_seq_length
a__ :Tuple = (image_size // patch_size) ** 2 + 1
a__ :Optional[int] = self.text_seq_length + self.image_seq_length
def _snake_case ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a__ :Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ :Optional[Any] = bbox[i, j, 3]
a__ :List[str] = bbox[i, j, 1]
a__ :str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ :Any = bbox[i, j, 2]
a__ :int = bbox[i, j, 0]
a__ :Optional[Any] = t
a__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ :List[Any] = None
if self.use_input_mask:
a__ :str = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ :Optional[Any] = None
if self.use_token_type_ids:
a__ :str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a__ :List[str] = None
a__ :List[str] = None
if self.use_labels:
a__ :str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ :List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a__ :Tuple = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self : Tuple , __A : Any , __A : Union[str, Any] , __A : List[str] , __A : Dict , __A : int , __A : Union[str, Any] , __A : Union[str, Any] , __A : Any ) ->Dict:
"""simple docstring"""
a__ :Optional[int] = LayoutLMvaModel(config=__A )
model.to(__A )
model.eval()
# text + image
a__ :List[Any] = model(__A , pixel_values=__A )
a__ :int = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A )
a__ :Union[str, Any] = model(__A , bbox=__A , pixel_values=__A , token_type_ids=__A )
a__ :Optional[Any] = model(__A , bbox=__A , pixel_values=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ :Dict = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ :Dict = model(pixel_values=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self : Tuple , __A : List[str] , __A : str , __A : Union[str, Any] , __A : str , __A : Any , __A : List[Any] , __A : str , __A : Tuple ) ->Tuple:
"""simple docstring"""
a__ :Optional[Any] = self.num_labels
a__ :Tuple = LayoutLMvaForSequenceClassification(__A )
model.to(__A )
model.eval()
a__ :str = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[int] , __A : str , __A : Tuple , __A : Union[str, Any] , __A : Union[str, Any] , __A : Dict , __A : int , __A : Optional[int] , __A : int ) ->List[str]:
"""simple docstring"""
a__ :Dict = self.num_labels
a__ :Dict = LayoutLMvaForTokenClassification(config=__A )
model.to(__A )
model.eval()
a__ :Tuple = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self : str , __A : Optional[Any] , __A : Optional[Any] , __A : List[str] , __A : Union[str, Any] , __A : int , __A : Optional[int] , __A : Union[str, Any] , __A : str ) ->Dict:
"""simple docstring"""
a__ :List[str] = LayoutLMvaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
a__ :List[str] = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : List[Any] ) ->Dict:
"""simple docstring"""
a__ :str = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) :str = config_and_inputs
a__ :Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _a ,_a ,unittest.TestCase):
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self : List[str] , __A : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] , __A : List[str] , __A : Dict ) ->Dict:
"""simple docstring"""
return True
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a__ :int = LayoutLMvaModelTester(self )
a__ :Union[str, Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self : int , __A : int , __A : List[Any] , __A : Optional[int]=False ) ->Optional[Any]:
"""simple docstring"""
a__ :Union[str, Any] = copy.deepcopy(__A )
if model_class in get_values(__A ):
a__ :Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
a__ :List[str] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in get_values(__A ):
a__ :int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
a__ :Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a__ :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
elif model_class in [
*get_values(__A ),
]:
a__ :List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__A , )
return inputs_dict
def _snake_case ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : int ) ->Optional[Any]:
"""simple docstring"""
a__ :str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ :List[Any] = type
self.model_tester.create_and_check_model(*__A )
def _snake_case ( self : Tuple ) ->str:
"""simple docstring"""
a__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _snake_case ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def _snake_case ( self : Optional[int] ) ->Dict:
"""simple docstring"""
a__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@slow
def _snake_case ( self : Union[str, Any] ) ->str:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ :int = LayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
a__ :List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase):
@cached_property
def _snake_case ( self : Union[str, Any] ) ->Dict:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a__ :Optional[Any] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(__A )
a__ :str = self.default_image_processor
a__ :List[str] = prepare_img()
a__ :Tuple = image_processor(images=__A , return_tensors="pt" ).pixel_values.to(__A )
a__ :Dict = torch.tensor([[1, 2]] )
a__ :Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a__ :int = model(
input_ids=input_ids.to(__A ) , bbox=bbox.to(__A ) , pixel_values=pixel_values.to(__A ) , )
# verify the logits
a__ :int = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , __A )
a__ :Any = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(__A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1E-4 ) )
| 395 | 0 |
from __future__ import annotations
from fractions import Fraction
def snake_case ( snake_case__ :int , snake_case__ :int) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def snake_case ( snake_case__ :int) -> list[str]:
_A = []
_A = 11
_A = int("""1""" + """0""" * digit_len)
for num in range(snake_case__ , snake_case__):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case__ , snake_case__):
solutions.append(F'''{num}/{den}''')
den += 1
num += 1
_A = 10
return solutions
def snake_case ( snake_case__ :int = 2) -> int:
_A = 1.0
for fraction in fraction_list(snake_case__):
_A = Fraction(snake_case__)
result *= frac.denominator / frac.numerator
return int(snake_case__)
if __name__ == "__main__":
print(solution())
| 83 | import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = (PNDMScheduler,)
__UpperCAmelCase = (("""num_inference_steps""", 50),)
def __magic_name__ ( self : int , **snake_case_ : List[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**snake_case_ )
return config
def __magic_name__ ( self : Optional[int] , snake_case_ : int=0 , **snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : str = dict(self.forward_default_kwargs )
snake_case__ : int = kwargs.pop('''num_inference_steps''' , snake_case_ )
snake_case__ : Optional[int] = self.dummy_sample
snake_case__ : Dict = 0.1 * sample
snake_case__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
snake_case__ : List[Any] = self.get_scheduler_config(**snake_case_ )
snake_case__ : List[Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
snake_case__ : int = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
snake_case__ : Union[str, Any] = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
snake_case__ : List[str] = dummy_past_residuals[:]
snake_case__ : Any = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : List[str] = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ : Union[str, Any] = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : Any = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
pass
def __magic_name__ ( self : str , snake_case_ : str=0 , **snake_case_ : str ):
'''simple docstring'''
snake_case__ : List[Any] = dict(self.forward_default_kwargs )
snake_case__ : Optional[int] = kwargs.pop('''num_inference_steps''' , snake_case_ )
snake_case__ : Optional[int] = self.dummy_sample
snake_case__ : int = 0.1 * sample
snake_case__ : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
snake_case__ : Tuple = self.get_scheduler_config()
snake_case__ : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : Optional[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
snake_case__ : Tuple = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : str = dummy_past_residuals[:]
snake_case__ : Union[str, Any] = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : Optional[int] = new_scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ : Dict = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : List[str] = new_scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __magic_name__ ( self : Tuple , **snake_case_ : List[Any] ):
'''simple docstring'''
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config(**snake_case_ )
snake_case__ : Any = scheduler_class(**snake_case_ )
snake_case__ : Optional[Any] = 1_0
snake_case__ : Optional[Any] = self.dummy_model()
snake_case__ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case__ : List[Any] = model(snake_case_ , snake_case_ )
snake_case__ : str = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case__ : Optional[Any] = model(snake_case_ , snake_case_ )
snake_case__ : List[str] = scheduler.step_plms(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Dict = dict(self.forward_default_kwargs )
snake_case__ : Optional[int] = kwargs.pop('''num_inference_steps''' , snake_case_ )
for scheduler_class in self.scheduler_classes:
snake_case__ : Optional[Any] = self.get_scheduler_config()
snake_case__ : Dict = scheduler_class(**snake_case_ )
snake_case__ : List[Any] = self.dummy_sample
snake_case__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case_ , '''set_timesteps''' ):
scheduler.set_timesteps(snake_case_ )
elif num_inference_steps is not None and not hasattr(snake_case_ , '''set_timesteps''' ):
snake_case__ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
snake_case__ : Union[str, Any] = dummy_past_residuals[:]
snake_case__ : Tuple = scheduler.step_prk(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : str = scheduler.step_prk(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case__ : str = scheduler.step_plms(snake_case_ , 0 , snake_case_ , **snake_case_ ).prev_sample
snake_case__ : Optional[int] = scheduler.step_plms(snake_case_ , 1 , snake_case_ , **snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case_ )
snake_case__ : str = self.scheduler_classes[0]
snake_case__ : List[Any] = self.get_scheduler_config(steps_offset=1 )
snake_case__ : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=snake_case_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Optional[int] = 2_7
for scheduler_class in self.scheduler_classes:
snake_case__ : Union[str, Any] = self.dummy_sample
snake_case__ : Optional[Any] = 0.1 * sample
snake_case__ : Any = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case__ : str = scheduler.step_prk(snake_case_ , snake_case_ , snake_case_ ).prev_sample
def __magic_name__ ( self : int ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : int = self.get_scheduler_config()
snake_case__ : Tuple = scheduler_class(**snake_case_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Optional[int] = self.full_loop()
snake_case__ : int = torch.sum(torch.abs(snake_case_ ) )
snake_case__ : Tuple = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Tuple = self.full_loop(prediction_type='''v_prediction''' )
snake_case__ : List[Any] = torch.sum(torch.abs(snake_case_ ) )
snake_case__ : Any = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : str = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.0_1 )
snake_case__ : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
snake_case__ : Tuple = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : List[Any] = self.full_loop(set_alpha_to_one=snake_case_ , beta_start=0.0_1 )
snake_case__ : List[str] = torch.sum(torch.abs(snake_case_ ) )
snake_case__ : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 347 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
def _a ( __lowerCAmelCase : bool , __lowerCAmelCase : bool ):
"""simple docstring"""
def run_func(__lowerCAmelCase : Optional[int] ):
@wraps(__lowerCAmelCase )
def run_in_eager_mode(*__lowerCAmelCase : str , **__lowerCAmelCase : Union[str, Any] ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
@wraps(__lowerCAmelCase )
@tf.function(experimental_compile=__lowerCAmelCase )
def run_in_graph_mode(*__lowerCAmelCase : Tuple , **__lowerCAmelCase : Optional[int] ):
return func(*__lowerCAmelCase , **__lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
snake_case__ : int = random.Random()
snake_case__ : Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = "TensorFlow"
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return tf.__version__
def __magic_name__ ( self : Any , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : Optional[int] = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_inference )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : int = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_train )
def __magic_name__ ( self : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
snake_case__ : Optional[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : Union[str, Any] = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_inference )
def __magic_name__ ( self : str , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
snake_case__ : str = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
snake_case__ : int = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_train )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
snake_case__ : str = (
hasattr(snake_case_ , '''architectures''' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : str = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
snake_case__ : Union[str, Any] = getattr(snake_case_ , snake_case_ )
snake_case__ : Tuple = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
snake_case__ : Optional[int] = TF_MODEL_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
snake_case__ : Union[str, Any] = config.vocab_size if hasattr(snake_case_ , '''vocab_size''' ) else config.encoder.vocab_size
snake_case__ : int = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(snake_case_ , decoder_input_ids=snake_case_ , training=snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(snake_case_ , training=snake_case_ )
snake_case__ : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
'''simple docstring'''
snake_case__ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
snake_case__ : List[Any] = (
hasattr(snake_case_ , '''architectures''' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case__ : Union[str, Any] = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case__ : Any = __import__('''transformers''' , fromlist=[model_class] )
snake_case__ : Dict = getattr(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
snake_case__ : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
snake_case__ : Optional[Any] = config.vocab_size if hasattr(snake_case_ , '''vocab_size''' ) else config.encoder.vocab_size
snake_case__ : Optional[int] = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case__ : List[str] = model(snake_case_ , decoder_input_ids=snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
snake_case__ : Any = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case__ : Dict = model(snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
snake_case__ : Optional[int] = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
snake_case__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __magic_name__ ( self : int , snake_case_ : List[Any] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(snake_case_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case__ : Optional[int] = timeit.repeat(
snake_case_ , repeat=self.args.repeat , number=1_0 , )
return min(snake_case_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def __magic_name__ ( self : Tuple , snake_case_ : Callable[[], None] ):
'''simple docstring'''
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
snake_case__ : int = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
snake_case__ : int = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
snake_case__ : Dict = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case__ : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(snake_case_ )
snake_case__ : Union[str, Any] = meminfo.used
snake_case__ : int = Memory(snake_case_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
snake_case__ : str = None
else:
snake_case__ : Union[str, Any] = measure_peak_memory_cpu(snake_case_ )
snake_case__ : int = Memory(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case__ : int = stop_memory_tracing(snake_case_ )
if memory is None:
snake_case__ : Optional[int] = summary.total
else:
snake_case__ : str = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 347 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_snake_case = 10
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = (left + right) // 3 + 1
_lowerCAmelCase : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCAmelCase : List[Any] = one_third - 1
elif array[two_third] < target:
_lowerCAmelCase : Any = two_third + 1
else:
_lowerCAmelCase : Optional[int] = one_third + 1
_lowerCAmelCase : Union[str, Any] = two_third - 1
else:
return -1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = (left + right) // 3 + 1
_lowerCAmelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = ite_ternary_search(collection, target)
_snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : int = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
snake_case : List[str] = PipelineDataFormat.from_str(
format=lowercase ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(lowercase ,lowercase )
class __lowercase (lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self , A , A ) -> str:
snake_case : Any = nlp
snake_case : List[Any] = reader
@staticmethod
def UpperCAmelCase ( A ) -> str:
snake_case : List[str] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=A , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=A , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=A , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=A , help="""Name or path to the model\'s config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=A , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=A , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=A , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=A , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=A )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = self._nlp, []
for entry in self._reader:
snake_case : Any = nlp(**A ) if self._reader.is_multi_columns else nlp(A )
if isinstance(A , A ):
outputs.append(A )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
snake_case : List[str] = self._reader.save_binary(A )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A )
| 587 |
def _a ( UpperCAmelCase = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__ : int = set(range(3 , UpperCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , UpperCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCAmelCase , UpperCAmelCase ) ) )
lowerCamelCase__ : List[Any] = [float(UpperCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCAmelCase , limit + 1 , UpperCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 315 | 0 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = OmegaConf.load(SCREAMING_SNAKE_CASE__ )
if display:
print(yaml.dump(OmegaConf.to_container(SCREAMING_SNAKE_CASE__ ) ) )
return config
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
if conf_path is None:
lowerCAmelCase : List[str] = """./model_checkpoints/vqgan_only.yaml"""
lowerCAmelCase : Optional[int] = load_config(SCREAMING_SNAKE_CASE__ ,display=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[Any] = VQModel(**config.model.params )
if ckpt_path is None:
lowerCAmelCase : Optional[Any] = """./model_checkpoints/vqgan_only.pt"""
lowerCAmelCase : int = torch.load(SCREAMING_SNAKE_CASE__ ,map_location=SCREAMING_SNAKE_CASE__ )
if ".ckpt" in ckpt_path:
lowerCAmelCase : List[str] = sd["""state_dict"""]
model.load_state_dict(SCREAMING_SNAKE_CASE__ ,strict=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
del sd
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = model.encode(SCREAMING_SNAKE_CASE__ )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowerCAmelCase : Optional[int] = model.decode(SCREAMING_SNAKE_CASE__ )
return xrec
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
lowerCAmelCase : Tuple = string.rsplit(""".""" ,1 )
if reload:
lowerCAmelCase : List[Any] = importlib.import_module(SCREAMING_SNAKE_CASE__ )
importlib.reload(SCREAMING_SNAKE_CASE__ )
return getattr(importlib.import_module(SCREAMING_SNAKE_CASE__ ,package=SCREAMING_SNAKE_CASE__ ) ,cls )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" ,{} ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
lowerCAmelCase : Tuple = instantiate_from_config(SCREAMING_SNAKE_CASE__ )
if sd is not None:
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if ckpt:
lowerCAmelCase : str = torch.load(SCREAMING_SNAKE_CASE__ ,map_location="""cpu""" )
lowerCAmelCase : Dict = pl_sd["""global_step"""]
print(F"""loaded model from global step {global_step}.""" )
else:
lowerCAmelCase : int = {"""state_dict""": None}
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[int] = load_model_from_config(config.model ,pl_sd["""state_dict"""] ,gpu=SCREAMING_SNAKE_CASE__ ,eval_mode=SCREAMING_SNAKE_CASE__ )["""model"""]
return model, global_step
| 707 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[str] = None
if token is not None:
lowerCAmelCase : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowerCAmelCase : Any = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase : int = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : List[str] = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
if token is not None:
lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : Optional[int] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ).json()
lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowerCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : int = requests.get(url + F"""&page={i + 2}""" ,headers=SCREAMING_SNAKE_CASE__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Dict = None
if token is not None:
lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowerCAmelCase : str = requests.get(SCREAMING_SNAKE_CASE__ ,headers=SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = result.headers["""Location"""]
lowerCAmelCase : Optional[int] = requests.get(SCREAMING_SNAKE_CASE__ ,allow_redirects=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,F"""{artifact_name}.zip""" )
with open(SCREAMING_SNAKE_CASE__ ,"""wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = []
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : Optional[int] = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
lowerCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase : str = line[: line.index(""": """ )]
lowerCAmelCase : Optional[int] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowerCAmelCase : Union[str, Any] = line[len("""FAILED """ ) :]
failed_tests.append(SCREAMING_SNAKE_CASE__ )
elif filename == "job_name.txt":
lowerCAmelCase : Union[str, Any] = line
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE__ )} for `errors` """
F"""and {len(SCREAMING_SNAKE_CASE__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
lowerCAmelCase : Optional[int] = None
if job_name and job_links:
lowerCAmelCase : Optional[int] = job_links.get(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : str = []
lowerCAmelCase : Union[str, Any] = [os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for p in os.listdir(SCREAMING_SNAKE_CASE__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE__ ,job_links=SCREAMING_SNAKE_CASE__ ) )
return errors
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : int = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase : List[str] = counter.most_common()
lowerCAmelCase : Union[str, Any] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase : int = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowerCAmelCase : str = test.split("""/""" )[2]
else:
lowerCAmelCase : List[Any] = None
return test
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
lowerCAmelCase : List[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase : int = [x for x in logs if x[2] is not None]
lowerCAmelCase : Optional[Any] = {x[2] for x in logs}
lowerCAmelCase : Dict = {}
for test in tests:
lowerCAmelCase : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase : Tuple = counter.most_common()
lowerCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
lowerCAmelCase : Any = dict(sorted(r.items() ,key=lambda SCREAMING_SNAKE_CASE__ : item[1]["count"] ,reverse=SCREAMING_SNAKE_CASE__ ) )
return r
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = """| no. | error | status |"""
lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
lowerCAmelCase : Union[str, Any] = [header, sep]
for error in reduced_by_error:
lowerCAmelCase : List[str] = reduced_by_error[error]["""count"""]
lowerCAmelCase : Any = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
lowerCAmelCase : Any = reduced_by_model[model]["""count"""]
lowerCAmelCase , lowerCAmelCase : Optional[int] = list(reduced_by_model[model]["""errors"""].items() )[0]
lowerCAmelCase : Optional[Any] = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(SCREAMING_SNAKE_CASE__ )
return "\n".join(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
lowerCAmelCase : Dict =parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCAmelCase : Optional[int] =get_job_links(args.workflow_run_id, token=args.token)
lowerCAmelCase : List[Any] ={}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCAmelCase : str =k.find(' / ')
lowerCAmelCase : Any =k[index + len(' / ') :]
lowerCAmelCase : str =v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Any =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCAmelCase : List[Any] =get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCAmelCase : str =Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCAmelCase : int =counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCAmelCase : Optional[int] =reduce_by_error(errors)
lowerCAmelCase : Tuple =reduce_by_model(errors)
lowerCAmelCase : Optional[Any] =make_github_table(reduced_by_error)
lowerCAmelCase : Union[str, Any] =make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 693 | 0 |
def snake_case (UpperCAmelCase__ ) -> bool:
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
UpperCamelCase_: Tuple = 4
UpperCamelCase_: str = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase_: Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11)) | 57 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str]="attention" ):
'''simple docstring'''
__UpperCAmelCase : int = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
__UpperCAmelCase : Dict = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
__UpperCAmelCase : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
__UpperCAmelCase : str = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def a ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=False ):
'''simple docstring'''
if split_mlp_wi:
__UpperCAmelCase : Dict = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
__UpperCAmelCase : Optional[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
__UpperCAmelCase : Optional[Any] = (wi_a, wi_a)
else:
__UpperCAmelCase : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
__UpperCAmelCase : Dict = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def a ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def a ( _UpperCAmelCase : dict , *, _UpperCAmelCase : int , _UpperCAmelCase : bool ):
'''simple docstring'''
__UpperCAmelCase : Tuple = traverse_util.flatten_dict(variables['''target'''] )
__UpperCAmelCase : Union[str, Any] = {'''/'''.join(_UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase : Optional[int] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , _UpperCAmelCase )
__UpperCAmelCase : List[str] = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : Tuple = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
__UpperCAmelCase : List[Any] = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , '''encoder''' , '''attention''' )
__UpperCAmelCase : Optional[int] = layer_norm
__UpperCAmelCase : str = k.T
__UpperCAmelCase : Dict = o.T
__UpperCAmelCase : Tuple = q.T
__UpperCAmelCase : List[str] = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase : str = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
__UpperCAmelCase : Dict = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , '''encoder''' , _UpperCAmelCase )
__UpperCAmelCase : str = layer_norm
if split_mlp_wi:
__UpperCAmelCase : List[str] = wi[0].T
__UpperCAmelCase : Dict = wi[1].T
else:
__UpperCAmelCase : Any = wi.T
__UpperCAmelCase : Tuple = wo.T
__UpperCAmelCase : Union[str, Any] = old[
'''encoder/relpos_bias/rel_embedding'''
].T
__UpperCAmelCase : Any = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : int = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
__UpperCAmelCase : Optional[Any] = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , '''decoder''' , '''self_attention''' )
__UpperCAmelCase : Dict = layer_norm
__UpperCAmelCase : int = k.T
__UpperCAmelCase : Dict = o.T
__UpperCAmelCase : Dict = q.T
__UpperCAmelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase : List[Any] = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
__UpperCAmelCase : Union[str, Any] = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
__UpperCAmelCase : List[str] = layer_norm
__UpperCAmelCase : str = k.T
__UpperCAmelCase : Dict = o.T
__UpperCAmelCase : List[str] = q.T
__UpperCAmelCase : List[str] = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase : List[Any] = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
__UpperCAmelCase : Dict = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , '''decoder''' , _UpperCAmelCase )
__UpperCAmelCase : Tuple = layer_norm
if split_mlp_wi:
__UpperCAmelCase : Tuple = wi[0].T
__UpperCAmelCase : List[Any] = wi[1].T
else:
__UpperCAmelCase : Tuple = wi.T
__UpperCAmelCase : Union[str, Any] = wo.T
__UpperCAmelCase : Tuple = old['''decoder/decoder_norm/scale''']
__UpperCAmelCase : Optional[int] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase : List[str] = old['''decoder/logits_dense/kernel'''].T
return new
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : bool ):
'''simple docstring'''
__UpperCAmelCase : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : List[str] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : str = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
__UpperCAmelCase : Optional[Any] = state_dict['''shared.weight''']
return state_dict
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__UpperCAmelCase : int = convert_tax_to_pytorch(_UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=_UpperCAmelCase )
__UpperCAmelCase : Any = make_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Dict = TaConfig.from_json_file(_UpperCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase : str = TaEncoderModel(_UpperCAmelCase )
else:
__UpperCAmelCase : Optional[Any] = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
__A =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 701 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__A ={
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 6_5_5_3_6,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8_0_0_0,
"sample_size": 1_3_1_0_7_2,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6_0_0_0,
"sample_size": 6_5_5_3_6,
},
}
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ):
'''simple docstring'''
return torch.atana(_UpperCAmelCase , _UpperCAmelCase ) / math.pi * 2
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = torch.sin(t * math.pi / 2 ) ** 2
__UpperCAmelCase : List[str] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCAmelCase , _UpperCAmelCase )
class UpperCAmelCase__ ( __UpperCamelCase ):
'''simple docstring'''
pass
class UpperCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , a_ : List[Any] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Any = DiffusionAttnUnetaD(a_ , n_attn_layers=4 )
__UpperCAmelCase : Dict = deepcopy(self.diffusion )
__UpperCAmelCase : List[Any] = torch.quasirandom.SobolEngine(1 , scramble=a_ )
def a ( _UpperCAmelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = MODELS_MAP[model_name]['''url''']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__A ={
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
__A ={
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
__A ={
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
__A ={
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
__A ={
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
__A ={
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( _UpperCAmelCase : List[str] ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return name.replace(_UpperCAmelCase , _UpperCAmelCase )
elif name.startswith(_UpperCAmelCase ):
return [name.replace(_UpperCAmelCase , _UpperCAmelCase ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( _UpperCAmelCase : Any , _UpperCAmelCase : Any=13 ):
'''simple docstring'''
__UpperCAmelCase : str = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
__UpperCAmelCase : Optional[Any] = 0
if string.startswith('''net.3.''' ):
depth += 1
__UpperCAmelCase : Optional[int] = string[6:]
elif string.startswith('''net.''' ):
__UpperCAmelCase : Optional[int] = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
__UpperCAmelCase : Optional[Any] = string[7:]
if string.startswith('''main.''' ):
__UpperCAmelCase : List[Any] = string[5:]
# mid block
if string[:2].isdigit():
__UpperCAmelCase : str = string[:2]
__UpperCAmelCase : List[str] = string[2:]
else:
__UpperCAmelCase : Tuple = string[0]
__UpperCAmelCase : Optional[Any] = string[1:]
if depth == max_depth:
__UpperCAmelCase : Tuple = MID_NUM_TO_LAYER[layer_num]
__UpperCAmelCase : Optional[Any] = '''mid_block'''
elif depth > 0 and int(_UpperCAmelCase ) < 7:
__UpperCAmelCase : Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num]
__UpperCAmelCase : List[str] = f'down_blocks.{depth}'
elif depth > 0 and int(_UpperCAmelCase ) > 7:
__UpperCAmelCase : List[Any] = UP_NUM_TO_LAYER[layer_num]
__UpperCAmelCase : str = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
__UpperCAmelCase : Optional[int] = DEPTH_0_TO_LAYER[layer_num]
__UpperCAmelCase : Dict = f'up_blocks.{max_depth - 1}' if int(_UpperCAmelCase ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
__UpperCAmelCase : str = string_left[1:]
if "resnets" in new_layer:
__UpperCAmelCase : Optional[int] = convert_resconv_naming(_UpperCAmelCase )
elif "attentions" in new_layer:
__UpperCAmelCase : Any = convert_attn_naming(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = new_string_left
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : Tuple = prefix + '''.''' + new_layer + '''.''' + string_left
else:
__UpperCAmelCase : Optional[int] = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def a ( _UpperCAmelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
__UpperCAmelCase : Any = rename(_UpperCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : str = transform_conv_attns(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__UpperCAmelCase : List[Any] = v
return new_state_dict
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ):
'''simple docstring'''
if len(_UpperCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
__UpperCAmelCase : Tuple = v[:, :, 0]
else:
# bias
__UpperCAmelCase : List[str] = v
else:
# qkv matrices
__UpperCAmelCase : Union[str, Any] = v.shape[0]
__UpperCAmelCase : List[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
__UpperCAmelCase : Dict = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
__UpperCAmelCase : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : str = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__UpperCAmelCase : Optional[Any] = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
__UpperCAmelCase : Any = download(_UpperCAmelCase )
__UpperCAmelCase : Any = MODELS_MAP[model_name]['''sample_rate''']
__UpperCAmelCase : Tuple = MODELS_MAP[model_name]['''sample_size''']
__UpperCAmelCase : Optional[Any] = Object()
__UpperCAmelCase : List[str] = sample_size
__UpperCAmelCase : List[str] = sample_rate
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : int = UNetaDModel(sample_size=_UpperCAmelCase , sample_rate=_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = diffusers_model.state_dict()
__UpperCAmelCase : List[Any] = DiffusionUncond(_UpperCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCAmelCase )['''state_dict'''] )
__UpperCAmelCase : int = orig_model.diffusion_ema.eval()
__UpperCAmelCase : List[Any] = orig_model.state_dict()
__UpperCAmelCase : List[str] = rename_orig_weights(_UpperCAmelCase )
__UpperCAmelCase : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
__UpperCAmelCase : Any = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCAmelCase ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(_UpperCAmelCase ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
__UpperCAmelCase : List[Any] = value.squeeze()
__UpperCAmelCase : Union[str, Any] = value
diffusers_model.load_state_dict(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = 1_00
__UpperCAmelCase : List[Any] = 33
__UpperCAmelCase : Union[str, Any] = IPNDMScheduler(num_train_timesteps=_UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.manual_seed(_UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.randn([1, 2, config.sample_size] , generator=_UpperCAmelCase ).to(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = torch.linspace(1 , 0 , steps + 1 , device=_UpperCAmelCase )[:-1]
__UpperCAmelCase : str = get_crash_schedule(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = DanceDiffusionPipeline(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
__UpperCAmelCase : Any = torch.manual_seed(33 )
__UpperCAmelCase : Any = pipe(num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase ).audios
__UpperCAmelCase : Optional[Any] = sampling.iplms_sample(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {} )
__UpperCAmelCase : List[Any] = generated.clamp(-1 , 1 )
__UpperCAmelCase : int = (generated - audio).abs().sum()
__UpperCAmelCase : List[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , _UpperCAmelCase )
print('''Diff max''' , _UpperCAmelCase )
assert diff_max < 1e-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
__A =parser.parse_args()
main(args)
| 241 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self : Any , _A : int , _A : int=12 , _A : int=7 , _A : Tuple=True , _A : Optional[int]=True , _A : Union[str, Any]=True , _A : str=99 , _A : str=32 , _A : int=32 , _A : Optional[Any]=2 , _A : Dict=4 , _A : int=37 , _A : List[Any]=0.1 , _A : str=0.1 , _A : Any=512 , _A : int=0.02 , _A : Optional[Any]=0 , _A : Dict=None , ):
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = projection_dim
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = scope
_UpperCamelCase = bos_token_id
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCamelCase = input_mask.numpy()
_UpperCamelCase , _UpperCamelCase = input_mask.shape
_UpperCamelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
_UpperCamelCase = 1
_UpperCamelCase = 0
_UpperCamelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(_A )
def UpperCamelCase_ ( self : str ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : str , _A : Optional[Any] ):
_UpperCamelCase = TFBlipTextModel(config=_A )
_UpperCamelCase = model(_A , attention_mask=_A , training=_A )
_UpperCamelCase = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = BlipTextModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCamelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCamelCase_ ( self : List[Any] ):
pass
def UpperCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def UpperCamelCase_ ( self : List[str] ):
pass
@slow
def UpperCamelCase_ ( self : Optional[int] ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFBlipTextModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase_ ( self : int , _A : Optional[int]=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_A )
| 10 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _snake_case :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="resnet50" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , ) -> Optional[Any]:
a_ = parent
a_ = out_indices if out_indices is not None else [4]
a_ = stage_names
a_ = out_features
a_ = backbone
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = use_pretrained_backbone
a_ = is_training
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ = self.get_config()
return config, pixel_values
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
a_ = TimmBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
a_ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ = config_and_inputs
a_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _snake_case ( snake_case , snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = (TimmBackbone,) if is_torch_available() else ()
_UpperCamelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a_ = TimmBackboneModelTester(self )
a_ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ = 'resnet18'
a_ = 'microsoft/resnet-18'
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , use_timm_backbone=UpperCAmelCase__ , out_indices=[1, 2, 3] )
a_ = AutoBackbone.from_pretrained(UpperCAmelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
a_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = True
a_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
a_ = self.all_model_classes[0]
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
a_ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
a_ = model(**UpperCAmelCase__ )
a_ = outputs[0][-1]
# Encoder-/Decoder-only models
a_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
a_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=UpperCAmelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = None
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
a_ = copy.deepcopy(UpperCAmelCase__ )
a_ = False
a_ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
a_ = model(**UpperCAmelCase__ )
| 697 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCamelCase =logging.get_logger(__name__)
UpperCamelCase ={
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A ( __lowerCamelCase ):
"""simple docstring"""
__a : Optional[Any] = '''dpt'''
def __init__( self , __lowerCAmelCase=7_68 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=30_72 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=3_84 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=[2, 5, 8, 11] , __lowerCAmelCase="project" , __lowerCAmelCase=[4, 2, 1, 0.5] , __lowerCAmelCase=[96, 1_92, 3_84, 7_68] , __lowerCAmelCase=2_56 , __lowerCAmelCase=-1 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.4 , __lowerCAmelCase=2_55 , __lowerCAmelCase=0.1 , __lowerCAmelCase=[1, 10_24, 24, 24] , __lowerCAmelCase=[0, 1] , __lowerCAmelCase=None , **__lowerCAmelCase , ):
super().__init__(**a_ )
UpperCamelCase_ : List[str] = hidden_size
UpperCamelCase_ : Optional[Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
UpperCamelCase_ : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
UpperCamelCase_ : Any = BitConfig(**a_ )
elif isinstance(a_ , a_ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
UpperCamelCase_ : Dict = BitConfig(**a_ )
elif isinstance(a_ , a_ ):
UpperCamelCase_ : Dict = backbone_config
else:
raise ValueError(
F"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
UpperCamelCase_ : int = backbone_featmap_shape
UpperCamelCase_ : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
UpperCamelCase_ : str = None
UpperCamelCase_ : Optional[Any] = None
UpperCamelCase_ : List[str] = []
UpperCamelCase_ : Optional[Any] = num_hidden_layers
UpperCamelCase_ : List[str] = num_attention_heads
UpperCamelCase_ : str = intermediate_size
UpperCamelCase_ : Dict = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : str = attention_probs_dropout_prob
UpperCamelCase_ : Optional[Any] = initializer_range
UpperCamelCase_ : Union[str, Any] = layer_norm_eps
UpperCamelCase_ : Optional[Any] = image_size
UpperCamelCase_ : int = patch_size
UpperCamelCase_ : Union[str, Any] = num_channels
UpperCamelCase_ : Tuple = qkv_bias
UpperCamelCase_ : Optional[int] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
UpperCamelCase_ : Dict = readout_type
UpperCamelCase_ : int = reassemble_factors
UpperCamelCase_ : Any = neck_hidden_sizes
UpperCamelCase_ : int = fusion_hidden_size
UpperCamelCase_ : Union[str, Any] = head_in_index
UpperCamelCase_ : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCamelCase_ : List[str] = use_auxiliary_head
UpperCamelCase_ : str = auxiliary_loss_weight
UpperCamelCase_ : Union[str, Any] = semantic_loss_ignore_index
UpperCamelCase_ : int = semantic_classifier_dropout
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase_ : int = self.backbone_config.to_dict()
UpperCamelCase_ : Union[str, Any] = self.__class__.model_type
return output
| 707 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
UpperCamelCase_ : str = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
UpperCamelCase_ : int = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCamelCase_ : Tuple = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
UpperCamelCase_ : Optional[Any] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
UpperCamelCase_ : int = tempfile.mkdtemp()
UpperCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_ : List[Any] = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
# load decoder from hub
UpperCamelCase_ : Union[str, Any] = """hf-internal-testing/ngram-beam-search-decoder"""
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
UpperCamelCase_ : str = self.add_kwargs_tokens_map.copy()
kwargs.update(__lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _UpperCAmelCase ( self , **__lowerCAmelCase ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__lowerCAmelCase )
def _UpperCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase_ : str = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_decoder()
UpperCamelCase_ : int = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Any = self.get_decoder()
UpperCamelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[Any] = floats_list((3, 10_00) )
UpperCamelCase_ : Tuple = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase_ : str = processor(__lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.get_feature_extractor()
UpperCamelCase_ : List[Any] = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = """This is a test string"""
UpperCamelCase_ : Optional[Any] = processor(text=__lowerCAmelCase )
UpperCamelCase_ : int = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self , __lowerCAmelCase=(2, 10, 16) , __lowerCAmelCase=77 ):
np.random.seed(__lowerCAmelCase )
return np.random.rand(*__lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : int = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Optional[int] = self.get_decoder()
UpperCamelCase_ : int = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCamelCase_ : Any = processor.decode(__lowerCAmelCase )
UpperCamelCase_ : Any = decoder.decode_beams(__lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase_ : str = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCamelCase_ : List[Any] = processor.batch_decode(__lowerCAmelCase )
else:
with get_context(__lowerCAmelCase ).Pool() as pool:
UpperCamelCase_ : Any = processor.batch_decode(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ : Tuple = list(__lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
UpperCamelCase_ : Optional[int] = decoder.decode_beams_batch(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__lowerCAmelCase , decoded_processor.lm_score )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = self.get_feature_extractor()
UpperCamelCase_ : Tuple = self.get_tokenizer()
UpperCamelCase_ : Tuple = self.get_decoder()
UpperCamelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : List[str] = self._get_dummy_logits()
UpperCamelCase_ : Dict = 15
UpperCamelCase_ : str = -20.0
UpperCamelCase_ : Dict = -4.0
UpperCamelCase_ : Union[str, Any] = processor.batch_decode(
__lowerCAmelCase , beam_width=__lowerCAmelCase , beam_prune_logp=__lowerCAmelCase , token_min_logp=__lowerCAmelCase , )
UpperCamelCase_ : Any = decoded_processor_out.text
UpperCamelCase_ : Tuple = list(__lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
UpperCamelCase_ : str = decoder.decode_beams_batch(
__lowerCAmelCase , __lowerCAmelCase , beam_width=__lowerCAmelCase , beam_prune_logp=__lowerCAmelCase , token_min_logp=__lowerCAmelCase , )
UpperCamelCase_ : str = [d[0][0] for d in decoded_decoder_out]
UpperCamelCase_ : List[str] = [d[0][2] for d in decoded_decoder_out]
UpperCamelCase_ : Union[str, Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __lowerCAmelCase )
self.assertTrue(np.array_equal(__lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] , __lowerCAmelCase , atol=1E-3 ) )
self.assertTrue(np.array_equal(__lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] , __lowerCAmelCase , atol=1E-3 ) )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = self.get_feature_extractor()
UpperCamelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCamelCase_ : int = self.get_decoder()
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
UpperCamelCase_ : str = self._get_dummy_logits()
UpperCamelCase_ : Optional[int] = 2.0
UpperCamelCase_ : List[str] = 5.0
UpperCamelCase_ : Optional[Any] = -20.0
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : Union[str, Any] = processor.batch_decode(
__lowerCAmelCase , alpha=__lowerCAmelCase , beta=__lowerCAmelCase , unk_score_offset=__lowerCAmelCase , lm_score_boundary=__lowerCAmelCase , )
UpperCamelCase_ : List[str] = decoded_processor_out.text
UpperCamelCase_ : List[str] = list(__lowerCAmelCase )
decoder.reset_params(
alpha=__lowerCAmelCase , beta=__lowerCAmelCase , unk_score_offset=__lowerCAmelCase , lm_score_boundary=__lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
UpperCamelCase_ : int = decoder.decode_beams_batch(
__lowerCAmelCase , __lowerCAmelCase , )
UpperCamelCase_ : Any = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase_ : int = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCamelCase_ : Any = os.listdir(__lowerCAmelCase )
UpperCamelCase_ : Optional[Any] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCamelCase_ : Tuple = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
UpperCamelCase_ : Union[str, Any] = os.listdir(__lowerCAmelCase )
UpperCamelCase_ : str = os.listdir(__lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Dict = floats_list((3, 10_00) )
UpperCamelCase_ : List[Any] = processor_wavaveca(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase_ : Tuple = processor_auto(__lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
UpperCamelCase_ : Optional[int] = self._get_dummy_logits()
UpperCamelCase_ : Dict = processor_wavaveca.batch_decode(__lowerCAmelCase )
UpperCamelCase_ : Any = processor_auto.batch_decode(__lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Tuple = self.get_feature_extractor()
UpperCamelCase_ : int = self.get_tokenizer()
UpperCamelCase_ : List[Any] = self.get_decoder()
UpperCamelCase_ : List[str] = WavaVecaProcessorWithLM(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , decoder=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : int = self._get_dummy_logits()[0]
UpperCamelCase_ : List[Any] = processor.decode(__lowerCAmelCase , output_word_offsets=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
UpperCamelCase_ : Union[str, Any] = self._get_dummy_logits()
UpperCamelCase_ : Dict = processor.batch_decode(__lowerCAmelCase , output_word_offsets=__lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__lowerCAmelCase , __lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase ( self ):
import torch
UpperCamelCase_ : str = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__lowerCAmelCase )
UpperCamelCase_ : Union[str, Any] = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
UpperCamelCase_ : Union[str, Any] = iter(__lowerCAmelCase )
UpperCamelCase_ : int = next(__lowerCAmelCase )
UpperCamelCase_ : List[str] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
UpperCamelCase_ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCamelCase_ : Dict = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
UpperCamelCase_ : List[Any] = model(__lowerCAmelCase ).logits.cpu().numpy()
UpperCamelCase_ : Tuple = processor.decode(logits[0] , output_word_offsets=__lowerCAmelCase )
UpperCamelCase_ : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCamelCase_ : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
UpperCamelCase_ : Optional[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) , __lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(__lowerCAmelCase , """word""" ) ) , output.text )
# output times
UpperCamelCase_ : str = torch.tensor(self.get_from_offsets(__lowerCAmelCase , """start_time""" ) )
UpperCamelCase_ : Union[str, Any] = torch.tensor(self.get_from_offsets(__lowerCAmelCase , """end_time""" ) )
# fmt: off
UpperCamelCase_ : Union[str, Any] = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
UpperCamelCase_ : Union[str, Any] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=0.01 ) )
| 543 | 0 |
"""simple docstring"""
from typing import Any
import numpy as np
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return np.array_equal(SCREAMING_SNAKE_CASE , matrix.conjugate().T )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
__snake_case = v.conjugate().T
__snake_case = v_star.dot(SCREAMING_SNAKE_CASE )
assert isinstance(SCREAMING_SNAKE_CASE , np.ndarray )
return (v_star_dot.dot(SCREAMING_SNAKE_CASE )) / (v_star.dot(SCREAMING_SNAKE_CASE ))
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
__snake_case = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__snake_case = np.array([[1], [2], [3]] )
assert is_hermitian(SCREAMING_SNAKE_CASE ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
__snake_case = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(SCREAMING_SNAKE_CASE ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 163 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""MaskFormerFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_SCREAMING_SNAKE_CASE = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 163 | 1 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 1_0_0_0_0_0_0 ):
UpperCAmelCase__ : Dict = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCamelCase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution()) | 713 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
UpperCAmelCase__ : List[str] = len(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = max(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = min(UpperCamelCase__ )
# create the counting array
UpperCAmelCase__ : Dict = coll_max + 1 - coll_min
UpperCAmelCase__ : Optional[int] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase__ : Tuple = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
UpperCAmelCase__ : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _UpperCamelCase ( UpperCamelCase__ ):
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__A =input('Enter numbers separated by a comma:\n').strip()
__A =[int(item) for item in user_input.split(',')]
print(counting_sort(unsorted)) | 113 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__UpperCamelCase : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
__UpperCamelCase : Dict = {
"facebook/blenderbot_small-90M": 512,
}
class __lowerCAmelCase ( __lowercase ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = BlenderbotSmallTokenizer
def __init__( self :Optional[Any] , __magic_name__ :Tuple=None , __magic_name__ :Optional[int]=None , __magic_name__ :int="<|endoftext|>" , __magic_name__ :str="<|endoftext|>" , __magic_name__ :List[Any]="<|endoftext|>" , __magic_name__ :List[Any]=False , __magic_name__ :str=True , **__magic_name__ :List[str] , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=_a , merges=_a , add_prefix_space=_a , trim_offsets=_a , ) , bos_token=_a , eos_token=_a , unk_token=_a , **_a , )
a = add_prefix_space
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :Optional[Any]=None ):
'''simple docstring'''
a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self :Any , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 468 |
from math import factorial
def A(__a: int , __a: int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__a ) // (factorial(__a ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
F'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
F'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 122 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> list[int]:
if num <= 0:
a_ : Any = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE__ )
a_ : int = [True] * (num + 1)
a_ : int = []
a_ : List[str] = 2
a_ : Tuple = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
# Set multiples of start be False
for i in range(start * start, num + 1, SCREAMING_SNAKE_CASE__ ):
if sieve[i] is True:
a_ : Tuple = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip()))) | 370 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=1.0, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> Tuple:
if rng is None:
a_ : Any = global_rng
a_ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_ ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=4_0_0 , a_=2_0_0_0 , a_=2_0_4_8 , a_=1_2_8 , a_=1 , a_=5_1_2 , a_=3_0 , a_=4_4_1_0_0 , ):
a_ : Optional[Any] = parent
a_ : Tuple = batch_size
a_ : Union[str, Any] = min_seq_length
a_ : Dict = max_seq_length
a_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ : int = spectrogram_length
a_ : Optional[Any] = feature_size
a_ : Optional[int] = num_audio_channels
a_ : List[str] = hop_length
a_ : Optional[int] = chunk_length
a_ : List[str] = sampling_rate
def snake_case_ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case_ ( self , a_=False , a_=False ):
def _flatten(a_ ):
return list(itertools.chain(*a_ ) )
if equal_length:
a_ : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a_ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a_ : Tuple = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = TvltFeatureExtractor
def snake_case_ ( self ):
a_ : Dict = TvltFeatureExtractionTester(self )
def snake_case_ ( self ):
a_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , "spectrogram_length" ) )
self.assertTrue(hasattr(a_ , "feature_size" ) )
self.assertTrue(hasattr(a_ , "num_audio_channels" ) )
self.assertTrue(hasattr(a_ , "hop_length" ) )
self.assertTrue(hasattr(a_ , "chunk_length" ) )
self.assertTrue(hasattr(a_ , "sampling_rate" ) )
def snake_case_ ( self ):
a_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : List[Any] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
a_ : Tuple = self.feature_extraction_class.from_pretrained(a_ )
a_ : Dict = feat_extract_first.to_dict()
a_ : List[str] = feat_extract_second.to_dict()
a_ : List[Any] = dict_first.pop("mel_filters" )
a_ : str = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def snake_case_ ( self ):
a_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = os.path.join(a_ , "feat_extract.json" )
feat_extract_first.to_json_file(a_ )
a_ : str = self.feature_extraction_class.from_json_file(a_ )
a_ : Dict = feat_extract_first.to_dict()
a_ : str = feat_extract_second.to_dict()
a_ : int = dict_first.pop("mel_filters" )
a_ : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def snake_case_ ( self ):
# Initialize feature_extractor
a_ : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
a_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a_ : Union[str, Any] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
a_ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
a_ : int = feature_extractor(a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
a_ : Optional[int] = feature_extractor(
a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=a_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
a_ : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a_ : Union[str, Any] = np.asarray(a_ )
a_ : Dict = feature_extractor(a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case_ ( self , a_ ):
a_ : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a_ : List[str] = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case_ ( self ):
a_ : List[str] = self._load_datasamples(1 )
a_ : Any = TvltFeatureExtractor()
a_ : Union[str, Any] = feature_extractor(a_ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
a_ : Dict = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1e-4 ) ) | 370 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : int = 42
__magic_name__ : str = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 279 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _SCREAMING_SNAKE_CASE ( snake_case ):
lowerCamelCase_ = 'perceiver'
def __init__( self : Any , snake_case_ : str=256 , snake_case_ : str=1280 , snake_case_ : str=768 , snake_case_ : List[Any]=1 , snake_case_ : int=26 , snake_case_ : Union[str, Any]=8 , snake_case_ : Any=8 , snake_case_ : Dict=None , snake_case_ : Optional[int]=None , snake_case_ : int="kv" , snake_case_ : List[str]=1 , snake_case_ : str=1 , snake_case_ : Optional[Any]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : List[str]=0.02 , snake_case_ : Any=1E-12 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=262 , snake_case_ : Tuple=2048 , snake_case_ : List[str]=56 , snake_case_ : Optional[Any]=[368, 496] , snake_case_ : str=16 , snake_case_ : Any=1920 , snake_case_ : Optional[int]=16 , snake_case_ : Optional[Any]=[1, 16, 224, 224] , **snake_case_ : Tuple , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A : List[Any] = num_latents
A : List[Any] = d_latents
A : List[str] = d_model
A : Optional[int] = num_blocks
A : int = num_self_attends_per_block
A : int = num_self_attention_heads
A : List[Any] = num_cross_attention_heads
A : Union[str, Any] = qk_channels
A : Optional[int] = v_channels
A : Union[str, Any] = cross_attention_shape_for_attention
A : List[Any] = self_attention_widening_factor
A : List[str] = cross_attention_widening_factor
A : Optional[int] = hidden_act
A : List[Any] = attention_probs_dropout_prob
A : str = initializer_range
A : Tuple = layer_norm_eps
A : List[str] = use_query_residual
# masked language modeling attributes
A : Tuple = vocab_size
A : int = max_position_embeddings
# image classification attributes
A : Dict = image_size
# flow attributes
A : int = train_size
# multimodal autoencoding attributes
A : Dict = num_frames
A : int = audio_samples_per_frame
A : str = samples_per_patch
A : Dict = output_shape
class _SCREAMING_SNAKE_CASE ( snake_case ):
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
if self.task == "multiple-choice":
A : Union[str, Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return 1E-4
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 40 , snake_case_ : int = 40 , ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : List[str] = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A : str = preprocessor.num_special_tokens_to_add(snake_case_ )
A : str = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
A : int = [''' '''.join(['''a'''] ) * seq_length] * batch_size
A : List[str] = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
A : Any = inputs.pop('''input_ids''' )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A : List[Any] = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
A : str = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
A : Optional[Any] = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
A : List[Any] = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' ) | 256 | 0 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str]=1024):
"""simple docstring"""
_A , _A : str = [], []
_A : Optional[Any] = list(zip(lowerCAmelCase , lowerCAmelCase))
_A , _A : Dict = sorted_examples[0]
def is_too_big(lowerCAmelCase : Optional[Any]):
return tok(lowerCAmelCase , return_tensors='''pt''').input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:]):
_A : Optional[int] = new_src + ''' ''' + src
_A : List[str] = new_tgt + ''' ''' + tgt
if is_too_big(lowerCAmelCase) or is_too_big(lowerCAmelCase): # cant fit, finalize example
finished_src.append(lowerCAmelCase)
finished_tgt.append(lowerCAmelCase)
_A , _A : Dict = src, tgt
else: # can fit, keep adding
_A , _A : Optional[int] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase)
finished_tgt.append(lowerCAmelCase)
return finished_src, finished_tgt
def lowercase ( lowerCAmelCase : Any , lowerCAmelCase : Path , lowerCAmelCase : Tuple , lowerCAmelCase : Any):
"""simple docstring"""
_A : Dict = Path(lowerCAmelCase)
save_path.mkdir(exist_ok=lowerCAmelCase)
for split in ["train"]:
_A , _A : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
_A : Optional[Any] = [x.rstrip() for x in Path(lowerCAmelCase).open().readlines()]
_A : Optional[Any] = [x.rstrip() for x in Path(lowerCAmelCase).open().readlines()]
_A , _A : Optional[Any] = pack_examples(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase)
print(f"""packed {split} split from {len(lowerCAmelCase)} examples -> {len(lowerCAmelCase)}.""")
Path(save_path / f"""{split}.source""").open('''w''').write('''\n'''.join(lowerCAmelCase))
Path(save_path / f"""{split}.target""").open('''w''').write('''\n'''.join(lowerCAmelCase))
for split in ["val", "test"]:
_A , _A : List[str] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(lowerCAmelCase , save_path / f"""{split}.source""")
shutil.copyfile(lowerCAmelCase , save_path / f"""{split}.target""")
def lowercase ( ):
"""simple docstring"""
_A : Any = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=lowerCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''')
parser.add_argument('''--max_seq_len''' , type=lowerCAmelCase , default=128)
parser.add_argument('''--data_dir''' , type=lowerCAmelCase)
parser.add_argument('''--save_path''' , type=lowerCAmelCase)
_A : List[str] = parser.parse_args()
_A : List[Any] = AutoTokenizer.from_pretrained(args.tok_name)
return pack_data_dir(lowerCAmelCase , Path(args.data_dir) , args.max_seq_len , args.save_path)
if __name__ == "__main__":
packer_cli()
| 417 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : complex , lowerCAmelCase : str = "x" , lowerCAmelCase : float = 10**-10 , lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_A : List[Any] = symbols(lowerCAmelCase)
_A : Any = lambdify(lowerCAmelCase , lowerCAmelCase)
_A : Tuple = lambdify(lowerCAmelCase , diff(lowerCAmelCase , lowerCAmelCase))
_A : int = starting_point
while True:
if diff_function(lowerCAmelCase) != 0:
_A : int = prev_guess - multiplicity * func(lowerCAmelCase) / diff_function(
lowerCAmelCase)
else:
raise ZeroDivisionError('''Could not find root''') from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
_A : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 417 | 1 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 600851475143 )-> int:
try:
UpperCamelCase = int(__UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
UpperCamelCase = 2
UpperCamelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCamelCase = i
while n % i == 0:
UpperCamelCase = n // i
i += 1
return int(__UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }')
| 301 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if audio_length_in_s is None:
UpperCamelCase = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCamelCase = audio_length_in_s * self.unet.config.sample_rate
UpperCamelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
UpperCamelCase = int(_SCREAMING_SNAKE_CASE )
if sample_size % down_scale_factor != 0:
UpperCamelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
""" process.""" )
UpperCamelCase = int(_SCREAMING_SNAKE_CASE )
UpperCamelCase = next(iter(self.unet.parameters() ) ).dtype
UpperCamelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(_SCREAMING_SNAKE_CASE )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCamelCase = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=audio.device )
UpperCamelCase = self.scheduler.timesteps.to(_SCREAMING_SNAKE_CASE )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCamelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
| 301 | 1 |
'''simple docstring'''
from __future__ import annotations
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> list[list[int]]:
__snake_case : list[list[int]] = []
create_all_state(1 ,_UpperCAmelCase ,_UpperCAmelCase ,[] ,_UpperCAmelCase )
return result
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : list[int] ,_UpperCAmelCase : list[list[int]] ,) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_UpperCAmelCase ,total_number - level + 2 ):
current_list.append(_UpperCAmelCase )
create_all_state(i + 1 ,_UpperCAmelCase ,level - 1 ,_UpperCAmelCase ,_UpperCAmelCase )
current_list.pop()
def a_ ( _UpperCAmelCase : list[list[int]] ) -> None:
for i in total_list:
print(*_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = 4
A__ : Union[str, Any] = 2
A__ : int = generate_all_combinations(n, k)
print_all_state(total_list)
| 124 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Optional[int] = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 124 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int = 10_00 ):
'''simple docstring'''
_lowerCamelCase : int = 3
_lowerCamelCase : List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple=1_3 , __lowerCAmelCase : Any=2 , __lowerCAmelCase : List[str]=2_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : Optional[Any]=3_2 , __lowerCAmelCase : List[Any]=5 , __lowerCAmelCase : int=4 , __lowerCAmelCase : int=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : int=1_0 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : str=None , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Union[str, Any]=2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : List[Any] = num_mel_bins
_lowerCamelCase : int = is_training
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : int = attention_probs_dropout_prob
_lowerCamelCase : List[Any] = type_sequence_label_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : List[str] = scope
_lowerCamelCase : Optional[int] = frequency_stride
_lowerCamelCase : List[Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCamelCase : Optional[int] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
_lowerCamelCase : Any = frequency_out_dimension * time_out_dimension
_lowerCamelCase : List[Any] = num_patches + 2
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : int = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ASTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case__ : Any = False
snake_case__ : List[Any] = False
snake_case__ : Optional[Any] = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ASTModelTester(self )
_lowerCamelCase : Any = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Any = [*signature.parameters.keys()]
_lowerCamelCase : str = ['''input_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ASTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' )
_lowerCamelCase , _lowerCamelCase : str = torchaudio.load(A_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : int = self.default_feature_extractor
_lowerCamelCase : Union[str, Any] = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = self.default_feature_extractor
_lowerCamelCase , _lowerCamelCase : List[Any] = prepare_audio()
_lowerCamelCase : Dict = audio.squeeze().numpy()
_lowerCamelCase : Tuple = feature_extractor(__lowerCAmelCase , sampling_rate=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_lowerCamelCase : Optional[int] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
| 83 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__magic_name__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase = XLMProphetNetForConditionalGenerationOld.from_pretrained(_UpperCAmelCase )
lowercase , lowercase = XLMProphetNetForConditionalGeneration.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase )
else:
lowercase = ProphetNetForConditionalGenerationOld.from_pretrained(_UpperCAmelCase )
lowercase , lowercase = ProphetNetForConditionalGeneration.from_pretrained(
_UpperCAmelCase , output_loading_info=_UpperCAmelCase )
lowercase = ['key_proj', 'value_proj', 'query_proj']
lowercase = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
lowercase = key.split('.' )
if attributes[0] == "lm_head":
lowercase = prophet
lowercase = prophet_old
else:
lowercase = prophet.prophetnet
lowercase = prophet_old.model
lowercase = False
for attribute in attributes:
if attribute in mapping:
lowercase = mapping[attribute]
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) > 0:
lowercase = attribute
elif hasattr(_UpperCAmelCase , _UpperCAmelCase ):
lowercase = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
lowercase = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase = old_model.bias
logger.info(f"""{attribute} is initialized""" )
lowercase = True
break
elif attribute in special_keys and hasattr(_UpperCAmelCase , 'in_proj_weight' ):
lowercase = old_model.in_proj_weight.shape[0] // 3
lowercase = getattr(_UpperCAmelCase , _UpperCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
lowercase = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
lowercase = True
break
if attribute.isdigit():
lowercase = model[int(_UpperCAmelCase )]
lowercase = old_model[int(_UpperCAmelCase )]
else:
lowercase = getattr(_UpperCAmelCase , _UpperCAmelCase )
if old_attribute == "":
lowercase = old_model
else:
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
lowercase = getattr(_UpperCAmelCase , _UpperCAmelCase )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__magic_name__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 314 |
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = 0
lowercase = len(_UpperCAmelCase ) # No of vertices in graph
lowercase = [0] * n
lowercase = [False] * n
def dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase = True
lowercase = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , id_ )
lowercase = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase = min(low[at] , low[to] )
lowercase = []
for i in range(_UpperCAmelCase ):
if not visited[i]:
dfs(_UpperCAmelCase , -1 , _UpperCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
A_ : List[Any] = logging.getLogger(__name__)
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[str] = "summarization"
lowerCamelCase__ : Union[str, Any] = ["loss"]
lowerCamelCase__ : Tuple = ROUGE_KEYS
lowerCamelCase__ : Any = "rouge2"
def __init__( self : Dict , __UpperCAmelCase : str , **__UpperCAmelCase : Tuple ) -> int:
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(_lowerCamelCase , num_labels=_lowerCamelCase , mode=self.mode , **_lowerCamelCase )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / """metrics.json"""
SCREAMING_SNAKE_CASE__ = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = defaultdict(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.config.model_type
SCREAMING_SNAKE_CASE__ = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
SCREAMING_SNAKE_CASE__ = {
"""data_dir""": self.hparams.data_dir,
"""max_source_length""": self.hparams.max_source_length,
"""prefix""": self.model.config.prefix or """""",
}
SCREAMING_SNAKE_CASE__ = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
SCREAMING_SNAKE_CASE__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE__ = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE__ = get_git_info()["""repo_sha"""]
SCREAMING_SNAKE_CASE__ = hparams.num_workers
SCREAMING_SNAKE_CASE__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE__ = self.decoder_start_token_id
SCREAMING_SNAKE_CASE__ = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE__ = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE__ = self.model.config.max_length
SCREAMING_SNAKE_CASE__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict[str, torch.Tensor] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_lowerCamelCase , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
SCREAMING_SNAKE_CASE__ = True
return readable_batch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[Any] , **__UpperCAmelCase : str ) -> Any:
return self.model(_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer.batch_decode(
_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
return lmap(str.strip , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : dict ) -> int:
SCREAMING_SNAKE_CASE__ = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch["""input_ids"""], batch["""attention_mask"""]
SCREAMING_SNAKE_CASE__ = batch["""labels"""]
if isinstance(self.model , _lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = self.model._shift_right(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = shift_tokens_right(_lowerCamelCase , _lowerCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE__ = decoder_input_ids
self.save_readable_batch(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self(_lowerCamelCase , attention_mask=_lowerCamelCase , decoder_input_ids=_lowerCamelCase , use_cache=_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE__ = nn.CrossEntropyLoss(ignore_index=_lowerCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = label_smoothed_nll_loss(
_lowerCamelCase , _lowerCamelCase , self.hparams.label_smoothing , ignore_index=_lowerCamelCase )
return (loss,)
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.tokenizer.pad_token_id
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = self._step(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , _lowerCamelCase ) )
# tokens per batch
SCREAMING_SNAKE_CASE__ = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch["""input_ids"""].shape[0]
SCREAMING_SNAKE_CASE__ = batch["""input_ids"""].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE__ = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : int ) -> Optional[int]:
return self._generative_step(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]="val" ) -> Optional[Any]:
self.step_count += 1
SCREAMING_SNAKE_CASE__ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE__ = losses["""loss"""]
SCREAMING_SNAKE_CASE__ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
SCREAMING_SNAKE_CASE__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE__ = torch.tensor(_lowerCamelCase ).type_as(_lowerCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
SCREAMING_SNAKE_CASE__ = self.step_count
self.metrics[prefix].append(_lowerCamelCase ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE__ = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> List[str]:
return calculate_rouge(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : dict ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE__ = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=_lowerCamelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE__ = (time.time() - ta) / batch["""input_ids"""].shape[0]
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.ids_to_clean_text(batch["""labels"""] )
SCREAMING_SNAKE_CASE__ = self._step(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = dict(zip(self.loss_names , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = self.calc_generative_metrics(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ = np.mean(lmap(_lowerCamelCase , _lowerCamelCase ) )
base_metrics.update(gen_time=_lowerCamelCase , gen_len=_lowerCamelCase , preds=_lowerCamelCase , target=_lowerCamelCase , **_lowerCamelCase )
return base_metrics
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ) -> Optional[Any]:
return self._generative_step(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : str ) -> Dict:
return self.validation_epoch_end(_lowerCamelCase , prefix="""test""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.n_obs[type_path]
SCREAMING_SNAKE_CASE__ = self.target_lens[type_path]
SCREAMING_SNAKE_CASE__ = self.dataset_class(
self.tokenizer , type_path=_lowerCamelCase , n_obs=_lowerCamelCase , max_target_length=_lowerCamelCase , **self.dataset_kwargs , )
return dataset
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : bool = False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.get_dataset(_lowerCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_sortish_sampler(_lowerCamelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCamelCase , num_workers=self.num_workers , sampler=_lowerCamelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_lowerCamelCase , batch_sampler=_lowerCamelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=dataset.collate_fn , shuffle=_lowerCamelCase , num_workers=self.num_workers , sampler=_lowerCamelCase , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=_lowerCamelCase )
return dataloader
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
BaseTransformer.add_model_specific_args(_lowerCamelCase , _lowerCamelCase )
add_generic_args(_lowerCamelCase , _lowerCamelCase )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=_lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=_lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=_lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=_lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=_lowerCamelCase )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=_lowerCamelCase )
parser.add_argument("""--max_tokens_per_batch""" , type=_lowerCamelCase , default=_lowerCamelCase )
parser.add_argument("""--logger_name""" , type=_lowerCamelCase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=_lowerCamelCase , default=5_0_0 , required=_lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=_lowerCamelCase , default="""summarization""" , required=_lowerCamelCase , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=_lowerCamelCase , default=0.0 , required=_lowerCamelCase )
parser.add_argument("""--src_lang""" , type=_lowerCamelCase , default="""""" , required=_lowerCamelCase )
parser.add_argument("""--tgt_lang""" , type=_lowerCamelCase , default="""""" , required=_lowerCamelCase )
parser.add_argument("""--eval_beams""" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase )
parser.add_argument(
"""--val_metric""" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=_lowerCamelCase , default=1 , required=_lowerCamelCase , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = "translation"
lowerCamelCase__ : List[str] = ["loss"]
lowerCamelCase__ : Union[str, Any] = ["bleu"]
lowerCamelCase__ : Dict = "bleu"
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
super().__init__(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE__ = hparams.src_lang
SCREAMING_SNAKE_CASE__ = hparams.tgt_lang
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple ) -> Union[str, Any]:
return calculate_bleu(_lowerCamelCase , _lowerCamelCase )
def A ( snake_case__ , snake_case__=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=__a )
check_output_dir(__a , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE__ = SummarizationModule(__a )
else:
SCREAMING_SNAKE_CASE__ = TranslationModule(__a )
SCREAMING_SNAKE_CASE__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
SCREAMING_SNAKE_CASE__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = os.environ.get("""WANDB_PROJECT""" , __a )
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=__a )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE__ = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = args.val_metric == """loss"""
SCREAMING_SNAKE_CASE__ = generic_train(
__a , __a , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __a ) , early_stopping_callback=__a , logger=__a , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE__ = """"""
SCREAMING_SNAKE_CASE__ = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__a ) )
if checkpoints:
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
SCREAMING_SNAKE_CASE__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
A_ : Any = pl.Trainer.add_argparse_args(parser)
A_ : Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
A_ : int = parser.parse_args()
main(args)
| 196 |
"""simple docstring"""
from __future__ import annotations
def a_ ( __a , __a = None , __a = None ):
if start is None:
A__ = 0
if end is None:
A__ = len(__a ) - 1
if start >= end:
return
A__ = (start + end) // 2
slowsort(__a , __a , __a )
slowsort(__a , mid + 1 , __a )
if sequence[end] < sequence[mid]:
A__ , A__ = sequence[mid], sequence[end]
slowsort(__a , __a , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 571 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
SCREAMING_SNAKE_CASE__ = {
"""facebook/blenderbot_small-90M""": 512,
}
class lowercase ( lowercase_ ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase=False , lowercase=True , **lowercase , ) -> str:
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase , merges=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , ) , bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , **lowercase , )
lowerCAmelCase = add_prefix_space
def _snake_case ( self , lowercase , lowercase=None ) -> Optional[int]:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> int:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 720 |
"""simple docstring"""
import math
import qiskit
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
lowerCAmelCase = qiskit.QuantumRegister(4 , """qr""" )
lowerCAmelCase = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
lowerCAmelCase = [input_a, input_a, carry_in]
lowerCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
lowerCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 393 | 0 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
A = re.compile(R'\b(a|an|the)\b', re.UNICODE)
A = None
def UpperCAmelCase ( ):
lowerCamelCase : List[str] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.')
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.')
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).')
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.')
parser.add_argument(
'--na-prob-thresh' , '-t' , type=UpperCAmelCase__ , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=UpperCAmelCase__ , help='Save precision-recall curves to directory.')
parser.add_argument('--verbose' , '-v' , action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase : Dict = bool(qa['answers']['text'])
return qid_to_has_ans
def UpperCAmelCase ( UpperCAmelCase__ : Tuple):
def remove_articles(UpperCAmelCase__ : Any):
return ARTICLES_REGEX.sub(' ' , UpperCAmelCase__)
def white_space_fix(UpperCAmelCase__ : Any):
return " ".join(text.split())
def remove_punc(UpperCAmelCase__ : Tuple):
lowerCamelCase : Optional[Any] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(UpperCAmelCase__ : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase__))))
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any]):
if not s:
return []
return normalize_answer(UpperCAmelCase__).split()
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict):
return int(normalize_answer(UpperCAmelCase__) == normalize_answer(UpperCAmelCase__))
def UpperCAmelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str]):
lowerCamelCase : List[str] = get_tokens(UpperCAmelCase__)
lowerCamelCase : Any = get_tokens(UpperCAmelCase__)
lowerCamelCase : List[Any] = collections.Counter(UpperCAmelCase__) & collections.Counter(UpperCAmelCase__)
lowerCamelCase : Optional[Any] = sum(common.values())
if len(UpperCAmelCase__) == 0 or len(UpperCAmelCase__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCamelCase : List[str] = 1.0 * num_same / len(UpperCAmelCase__)
lowerCamelCase : List[Any] = 1.0 * num_same / len(UpperCAmelCase__)
lowerCamelCase : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]):
lowerCamelCase : Tuple = {}
lowerCamelCase : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase : Any = qa['id']
lowerCamelCase : Dict = [t for t in qa['answers']['text'] if normalize_answer(UpperCAmelCase__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase : Any = ['']
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCamelCase : int = preds[qid]
# Take max over all gold answers
lowerCamelCase : Optional[Any] = max(compute_exact(UpperCAmelCase__ , UpperCAmelCase__) for a in gold_answers)
lowerCamelCase : Optional[Any] = max(compute_fa(UpperCAmelCase__ , UpperCAmelCase__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int):
lowerCamelCase : Optional[Any] = {}
for qid, s in scores.items():
lowerCamelCase : Union[str, Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase : Optional[int] = float(not qid_to_has_ans[qid])
else:
lowerCamelCase : Union[str, Any] = s
return new_scores
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict=None):
if not qid_list:
lowerCamelCase : List[Any] = len(UpperCAmelCase__)
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values()) / total),
('f1', 1_0_0.0 * sum(fa_scores.values()) / total),
('total', total),
])
else:
lowerCamelCase : Tuple = len(UpperCAmelCase__)
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list) / total),
('total', total),
])
def UpperCAmelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str):
for k in new_eval:
lowerCamelCase : Optional[int] = new_eval[k]
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any):
plt.step(UpperCAmelCase__ , UpperCAmelCase__ , color='b' , alpha=0.2 , where='post')
plt.fill_between(UpperCAmelCase__ , UpperCAmelCase__ , step='post' , alpha=0.2 , color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.0_5])
plt.ylim([0.0, 1.0_5])
plt.title(UpperCAmelCase__)
plt.savefig(UpperCAmelCase__)
plt.clf()
def UpperCAmelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[int]=None):
lowerCamelCase : Optional[Any] = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: na_probs[k])
lowerCamelCase : Union[str, Any] = 0.0
lowerCamelCase : List[Any] = 1.0
lowerCamelCase : Optional[int] = 0.0
lowerCamelCase : Dict = [1.0]
lowerCamelCase : Optional[int] = [0.0]
lowerCamelCase : Optional[int] = 0.0
for i, qid in enumerate(UpperCAmelCase__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase : List[Any] = true_pos / float(i + 1)
lowerCamelCase : List[Any] = true_pos / float(UpperCAmelCase__)
if i == len(UpperCAmelCase__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCAmelCase__)
recalls.append(UpperCAmelCase__)
if out_image:
plot_pr_curve(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
return {"ap": 1_0_0.0 * avg_prec}
def UpperCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict):
if out_image_dir and not os.path.exists(UpperCAmelCase__):
os.makedirs(UpperCAmelCase__)
lowerCamelCase : Union[str, Any] = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCamelCase : str = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , 'pr_exact.png') , title='Precision-Recall curve for Exact Match score' , )
lowerCamelCase : Dict = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , 'pr_f1.png') , title='Precision-Recall curve for F1 score' , )
lowerCamelCase : Optional[Any] = {k: float(UpperCAmelCase__) for k, v in qid_to_has_ans.items()}
lowerCamelCase : List[Any] = make_precision_recall_eval(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , out_image=os.path.join(UpperCAmelCase__ , 'pr_oracle.png') , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , 'pr_exact')
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , 'pr_f1')
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , 'pr_oracle')
def UpperCAmelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]):
if not qid_list:
return
lowerCamelCase : Optional[int] = [na_probs[k] for k in qid_list]
lowerCamelCase : Union[str, Any] = np.ones_like(UpperCAmelCase__) / float(len(UpperCAmelCase__))
plt.hist(UpperCAmelCase__ , weights=UpperCAmelCase__ , bins=20 , range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(UpperCAmelCase__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple):
lowerCamelCase : int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCamelCase : Dict = num_no_ans
lowerCamelCase : Dict = cur_score
lowerCamelCase : Optional[Any] = 0.0
lowerCamelCase : Tuple = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__: na_probs[k])
for i, qid in enumerate(UpperCAmelCase__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase : int = scores[qid]
else:
if preds[qid]:
lowerCamelCase : Tuple = -1
else:
lowerCamelCase : Dict = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase : Tuple = cur_score
lowerCamelCase : str = na_probs[qid]
return 1_0_0.0 * best_score / len(UpperCAmelCase__), best_thresh
def UpperCAmelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]):
lowerCamelCase , lowerCamelCase : Optional[Any] = find_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase , lowerCamelCase : Optional[int] = find_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : Dict = best_exact
lowerCamelCase : Union[str, Any] = exact_thresh
lowerCamelCase : Tuple = best_fa
lowerCamelCase : Tuple = fa_thresh
def UpperCAmelCase ( ):
with open(OPTS.data_file) as f:
lowerCamelCase : Optional[Any] = json.load(UpperCAmelCase__)
lowerCamelCase : List[str] = dataset_json['data']
with open(OPTS.pred_file) as f:
lowerCamelCase : List[str] = json.load(UpperCAmelCase__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCamelCase : str = json.load(UpperCAmelCase__)
else:
lowerCamelCase : Tuple = {k: 0.0 for k in preds}
lowerCamelCase : Tuple = make_qid_to_has_ans(UpperCAmelCase__) # maps qid to True/False
lowerCamelCase : int = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase , lowerCamelCase : Tuple = get_raw_scores(UpperCAmelCase__ , UpperCAmelCase__)
lowerCamelCase : Tuple = apply_no_ans_threshold(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.na_prob_thresh)
lowerCamelCase : Optional[Any] = apply_no_ans_threshold(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.na_prob_thresh)
lowerCamelCase : str = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__)
if has_ans_qids:
lowerCamelCase : Optional[Any] = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ , qid_list=UpperCAmelCase__)
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , 'HasAns')
if no_ans_qids:
lowerCamelCase : Dict = make_eval_dict(UpperCAmelCase__ , UpperCAmelCase__ , qid_list=UpperCAmelCase__)
merge_eval(UpperCAmelCase__ , UpperCAmelCase__ , 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir)
histogram_na_prob(UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir , 'hasAns')
histogram_na_prob(UpperCAmelCase__ , UpperCAmelCase__ , OPTS.out_image_dir , 'noAns')
if OPTS.out_file:
with open(OPTS.out_file , 'w') as f:
json.dump(UpperCAmelCase__ , UpperCAmelCase__)
else:
print(json.dumps(UpperCAmelCase__ , indent=2))
if __name__ == "__main__":
A = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 320 |
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
A = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __snake_case ( a__):
def __init__( self, A = 101 ):
"""simple docstring"""
lowerCamelCase : int = length
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self, A ):
"""simple docstring"""
return i
class __snake_case :
def __call__( self, A ):
"""simple docstring"""
return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )}
class __snake_case ( nn.Module):
def __init__( self ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCamelCase : str = nn.Linear(120, 80 )
def UpperCAmelCase_ ( self, A, A=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0, device=input_ids.device ), input_ids
else:
return input_ids
class __snake_case ( a__):
@require_torch_neuroncore
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase : int = F'''--output_dir {output_dir}'''.split()
lowerCamelCase : Optional[Any] = ['torchrun'] + distributed_args + args
execute_subprocess_async(A, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __snake_case ( a__):
@require_torch_multi_gpu
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowerCamelCase : int = self.get_auto_remove_tmp_dir()
lowerCamelCase : Optional[Any] = F'''--output_dir {output_dir}'''.split()
lowerCamelCase : str = ['torchrun'] + distributed_args + args
execute_subprocess_async(A, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
A = HfArgumentParser((TrainingArguments,))
A = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
A = DummyDataset(dataset_length)
def UpperCAmelCase ( UpperCAmelCase__ : EvalPrediction):
lowerCamelCase : Union[str, Any] = list(range(len(UpperCAmelCase__)))
lowerCamelCase : Any = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''')
return {"success": success}
A = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
A = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A = 2
A = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A = None
| 320 | 1 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 165 |
"""simple docstring"""
def _lowerCAmelCase(a : int ) -> int:
if n == 1 or not isinstance(a , a ):
return 0
elif n == 2:
return 1
else:
_SCREAMING_SNAKE_CASE =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _lowerCAmelCase(a : int ) -> int:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =2
while digits < n:
index += 1
_SCREAMING_SNAKE_CASE =len(str(fibonacci(a ) ) )
return index
def _lowerCAmelCase(a : int = 1000 ) -> int:
return fibonacci_digits_index(a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 165 | 1 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__UpperCamelCase : Optional[int] = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def __SCREAMING_SNAKE_CASE ( A_ = "dhaka" , A_ = 5 ):
lowerCAmelCase__ : Optional[Any] = min(A_ , 50 ) # Prevent abuse!
lowerCAmelCase__ : List[str] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
lowerCAmelCase__ : Any = requests.get('''https://www.google.com/search''' , params=A_ , headers=A_ )
lowerCAmelCase__ : str = BeautifulSoup(html.text , '''html.parser''' )
lowerCAmelCase__ : Any = ''''''.join(
re.findall(r'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
lowerCAmelCase__ : Tuple = json.dumps(A_ )
lowerCAmelCase__ : Any = json.loads(A_ )
lowerCAmelCase__ : Dict = re.findall(
r'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , A_ , )
if not matched_google_image_data:
return 0
lowerCAmelCase__ : Any = re.sub(
r'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(A_ ) , )
lowerCAmelCase__ : List[Any] = re.findall(
r'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , A_ , )
for index, fixed_full_res_image in enumerate(A_ ):
if index >= max_images:
return index
lowerCAmelCase__ : Tuple = bytes(A_ , '''ascii''' ).decode(
'''unicode-escape''' )
lowerCAmelCase__ : Any = bytes(A_ , '''ascii''' ).decode(
'''unicode-escape''' )
lowerCAmelCase__ : Optional[Any] = urllib.request.build_opener()
lowerCAmelCase__ : Tuple = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(A_ )
lowerCAmelCase__ : Dict = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(A_ ):
os.makedirs(A_ )
urllib.request.urlretrieve( # noqa: S310
A_ , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__UpperCamelCase : int = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('''Please provide a search term.''')
raise
| 450 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCamelCase : int = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : Union[str, Any]=None ,**lowercase_ : List[str] ):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
lowerCAmelCase__ : List[str] = model
lowerCAmelCase__ : Optional[Any] = kwargs.get('''model_save_dir''' ,lowercase_ )
lowerCAmelCase__ : str = kwargs.get('''latest_model_name''' ,lowercase_ )
def __call__( self : Any ,**lowercase_ : Dict ):
lowerCAmelCase__ : List[Any] = {k: np.array(lowercase_ ) for k, v in kwargs.items()}
return self.model.run(lowercase_ ,lowercase_ )
@staticmethod
def __lowerCAmelCase ( lowercase_ : Union[str, Path] ,lowercase_ : str=None ,lowercase_ : str=None ):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
lowerCAmelCase__ : Optional[Any] = '''CPUExecutionProvider'''
return ort.InferenceSession(lowercase_ ,providers=[provider] ,sess_options=lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Union[str, Path] ,lowercase_ : Optional[str] = None ,**lowercase_ : str ):
lowerCAmelCase__ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase__ : Tuple = self.model_save_dir.joinpath(self.latest_model_name )
lowerCAmelCase__ : List[Any] = Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ ,lowercase_ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase__ : Dict = self.model_save_dir.joinpath(lowercase_ )
if src_path.exists():
lowerCAmelCase__ : Union[str, Any] = Path(lowercase_ ).joinpath(lowercase_ )
try:
shutil.copyfile(lowercase_ ,lowercase_ )
except shutil.SameFileError:
pass
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Union[str, os.PathLike] ,**lowercase_ : Union[str, Any] ,):
if os.path.isfile(lowercase_ ):
logger.error(F'Provided path ({save_directory}) should be a directory, not a file' )
return
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
# saving model weights/files
self._save_pretrained(lowercase_ ,**lowercase_ )
@classmethod
def __lowerCAmelCase ( cls : Dict ,lowercase_ : Union[str, Path] ,lowercase_ : Optional[Union[bool, str, None]] = None ,lowercase_ : Optional[Union[str, None]] = None ,lowercase_ : bool = False ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional["ort.SessionOptions"] = None ,**lowercase_ : Optional[int] ,):
lowerCAmelCase__ : Optional[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowercase_ ):
lowerCAmelCase__ : str = OnnxRuntimeModel.load_model(
os.path.join(lowercase_ ,lowercase_ ) ,provider=lowercase_ ,sess_options=lowercase_ )
lowerCAmelCase__ : Any = Path(lowercase_ )
# load model from hub
else:
# download model
lowerCAmelCase__ : Optional[Any] = hf_hub_download(
repo_id=lowercase_ ,filename=lowercase_ ,use_auth_token=lowercase_ ,revision=lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,)
lowerCAmelCase__ : str = Path(lowercase_ ).parent
lowerCAmelCase__ : Union[str, Any] = Path(lowercase_ ).name
lowerCAmelCase__ : Optional[Any] = OnnxRuntimeModel.load_model(lowercase_ ,provider=lowercase_ ,sess_options=lowercase_ )
return cls(model=lowercase_ ,**lowercase_ )
@classmethod
def __lowerCAmelCase ( cls : Any ,lowercase_ : Union[str, Path] ,lowercase_ : bool = True ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[str] = None ,**lowercase_ : Any ,):
lowerCAmelCase__ : Union[str, Any] = None
if len(str(lowercase_ ).split('''@''' ) ) == 2:
lowerCAmelCase__ ,lowerCAmelCase__ : str = model_id.split('''@''' )
return cls._from_pretrained(
model_id=lowercase_ ,revision=lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,use_auth_token=lowercase_ ,**lowercase_ ,)
| 450 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def count_of_possible_combinations(lowerCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
lowerCamelCase__ , lowerCamelCase__ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ : int = sum(
count_of_possible_combinations_with_dp_array(target - item , lowerCamelCase__ )
for item in array )
A_ : List[str] = answer
return answer
A_ : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = [0] * (target + 1)
A_ : int = 1
for i in range(1 , target + 1 ):
for j in range(lowerCamelCase__ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :Dict = 3
lowerCamelCase :int = 5
lowerCamelCase :Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 686 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 1 |
'''simple docstring'''
from itertools import product
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = sides_number
UpperCAmelCase__ : Union[str, Any] = max_face_number * dice_number
UpperCAmelCase__ : List[Any] = [0] * (max_total + 1)
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[int] = range(UpperCamelCase__ , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase__ , repeat=UpperCamelCase__ ):
UpperCAmelCase__ : str = sum(UpperCamelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def _UpperCamelCase ( ):
UpperCAmelCase__ : Optional[int] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCAmelCase__ : Any = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : str = 9
UpperCAmelCase__ : Optional[Any] = 4 * 9
UpperCAmelCase__ : List[str] = 6
for peter_total in range(UpperCamelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCAmelCase__ : Optional[Any] = (4**9) * (6**6)
UpperCAmelCase__ : List[str] = peter_wins_count / total_games_number
UpperCAmelCase__ : Optional[Any] = round(UpperCamelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""") | 407 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : str = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : str = use_attention_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def snake_case__ ( self):
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :List[Any] = True
lowerCAmelCase :int = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = FlaxBertModelTester(self)
@slow
def snake_case__ ( self):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
UpperCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained("""bert-base-cased""")
UpperCAmelCase__ : Optional[Any] = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase) | 407 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 |
from math import pow
def UpperCAmelCase__ ( _A , _A , _A , _A , _A , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
a_ = int(pow(_A , _A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
a_ , a_ = backtrack(
_A , _A , current_number + 1 , _A , _A )
return current_sum, solutions_count
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_A , _A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _A ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Any ) -> int:
__UpperCAmelCase =logging.get_logger()
# the current default level is logging.WARNING
__UpperCAmelCase =logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__SCREAMING_SNAKE_CASE )
def _a ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase =logging.get_verbosity()
__UpperCAmelCase =logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__UpperCAmelCase ="""Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(__SCREAMING_SNAKE_CASE )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def _a ( self : Dict ) -> List[str]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__UpperCAmelCase =logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__UpperCAmelCase =os.getenv("""TRANSFORMERS_VERBOSITY""" , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =logging.log_levels[env_level_str]
__UpperCAmelCase =logging.get_verbosity()
self.assertEqual(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
__UpperCAmelCase =""""""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def _a ( self : Union[str, Any] ) -> str:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__UpperCAmelCase =logging.logging.getLogger()
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def _a ( self : Dict ) -> Optional[int]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__UpperCAmelCase =logging.get_logger("""transformers.models.bart.tokenization_bart""" )
__UpperCAmelCase ="""Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning_advice(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl:
logger.warning_advice(__SCREAMING_SNAKE_CASE )
self.assertEqual(cl.out , msg + """\n""" )
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 68 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> Union[str, Any]:
A : Dict = logging.get_logger()
# the current default level is logging.WARNING
A : List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
def snake_case ( self ) -> str:
A : Any = logging.get_verbosity()
A : Optional[Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Tuple = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def snake_case ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
A : int = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , __UpperCAmelCase )
A : List[str] = logging.log_levels[env_level_str]
A : Optional[int] = logging.get_verbosity()
self.assertEqual(
__UpperCAmelCase , __UpperCAmelCase , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
A : str = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def snake_case ( self ) -> Optional[int]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
A : str = logging.logging.getLogger()
with CaptureLogger(__UpperCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def snake_case ( self ) -> Optional[int]:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
A : Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
A : Optional[int] = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
def snake_case__ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 542 | 0 |
"""simple docstring"""
a =[0, 2, 4, 6, 8]
a =[1, 3, 5, 7, 9]
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase__ =0
for digit in range(10 ):
lowerCamelCase__ =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , snake_case__ , snake_case__ )
return result
lowerCamelCase__ =0
for digita in range(10 ):
lowerCamelCase__ =digita
if (remainder + digita) % 2 == 0:
lowerCamelCase__ =ODD_DIGITS
else:
lowerCamelCase__ =EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase__ =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case__ , snake_case__ , )
return result
def lowerCamelCase_ ( __lowerCAmelCase = 9 ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(snake_case__ , 0 , [0] * length , snake_case__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 700 | """simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a =logging.getLogger(__name__)
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> str:
'''simple docstring'''
lowerCamelCase__ =bnb_quantization_config.load_in_abit
lowerCamelCase__ =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowerCamelCase__ =[]
# custom device map
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(device_map.keys() ) > 1:
lowerCamelCase__ =[key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__ =get_keys_to_not_convert(__lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCAmelCase )
lowerCamelCase__ =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__ =[]
lowerCamelCase__ =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCAmelCase )
# compatibility with peft
lowerCamelCase__ =load_in_abit
lowerCamelCase__ =load_in_abit
lowerCamelCase__ =get_parameter_device(__lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowerCamelCase__ =replace_with_bnb_layers(__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
# convert param to the right dtype
lowerCamelCase__ =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__ =name.replace(".weight" , "" ).replace(".bias" , "" )
lowerCamelCase__ =getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCAmelCase ):
param.to(__lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowerCamelCase__ =replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
lowerCamelCase__ =get_quantized_model_device_map(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_memory=__lowerCAmelCase , no_split_module_classes=__lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__ =True
lowerCamelCase__ =any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCAmelCase , offload_state_dict=__lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCAmelCase , device_map=__lowerCAmelCase , offload_dir=__lowerCAmelCase )
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> Dict:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__ ={"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowerCamelCase__ ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__ ={}
lowerCamelCase__ =special_dtypes
lowerCamelCase__ =no_split_module_classes
lowerCamelCase__ =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__ =get_balanced_memory(
__lowerCAmelCase , low_zero=(device_map == "balanced_low_0") , max_memory=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCamelCase__ =max_memory
lowerCamelCase__ =infer_auto_device_map(__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# check if don't have any quantized module on the cpu
lowerCamelCase__ =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__ ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> List[Any]:
'''simple docstring'''
if modules_to_not_convert is None:
lowerCamelCase__ =[]
lowerCamelCase__ , lowerCamelCase__ =_replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__ =[]
current_key_name.append(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__ =".".join(__lowerCAmelCase )
lowerCamelCase__ =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__ =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__ =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__ =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowerCamelCase__ =module.weight.data
if module.bias is not None:
lowerCamelCase__ =module.bias.data
bnb_module.requires_grad_(__lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__ =_replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
with init_empty_weights():
lowerCamelCase__ =deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__ =find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__ =sum(__lowerCAmelCase , [] )
lowerCamelCase__ =len(__lowerCAmelCase ) > 0
# Check if it is a base model
lowerCamelCase__ =False
if hasattr(__lowerCAmelCase , "base_model_prefix" ):
lowerCamelCase__ =not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__ =list(model.named_children() )
lowerCamelCase__ =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__ =set(__lowerCAmelCase ) - set(__lowerCAmelCase )
lowerCamelCase__ =list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
lowerCamelCase__ =[".weight", ".bias"]
lowerCamelCase__ =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__ =name.replace(__lowerCAmelCase , "" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
def lowerCamelCase_ ( __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for m in model.modules():
if isinstance(__lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def lowerCamelCase_ ( __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , 0 , dtype=__lowerCAmelCase , value=__lowerCAmelCase )
lowerCamelCase__ =param_name
lowerCamelCase__ =model
if "." in tensor_name:
lowerCamelCase__ =tensor_name.split("." )
for split in splits[:-1]:
lowerCamelCase__ =getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowerCamelCase__ =new_module
lowerCamelCase__ =splits[-1]
# offload weights
lowerCamelCase__ =False
offload_weight(module._parameters[tensor_name] , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __lowerCAmelCase , index=__lowerCAmelCase , )
else:
offload_weight(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
offload_weight(__lowerCAmelCase , param_name.replace("weight" , "SCB" ) , __lowerCAmelCase , index=__lowerCAmelCase )
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , "meta" , dtype=__lowerCAmelCase , value=torch.empty(*param.size() ) )
| 132 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """sew-d"""
def __init__( self , A_=32 , A_=768 , A_=12 , A_=12 , A_=3072 , A_=2 , A_=512 , A_=256 , A_=True , A_=True , A_=("p2c", "c2p") , A_="layer_norm" , A_="gelu_python" , A_=0.1 , A_=0.1 , A_=0.1 , A_=0.0 , A_=0.1 , A_=0.02 , A_=1e-7 , A_=1e-5 , A_="group" , A_="gelu" , A_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A_=False , A_=128 , A_=16 , A_=True , A_=0.05 , A_=10 , A_=2 , A_=0.0 , A_=10 , A_=0 , A_="mean" , A_=False , A_=False , A_=256 , A_=0 , A_=1 , A_=2 , **A_ , ) ->List[Any]:
'''simple docstring'''
super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ )
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : Optional[Any] = feat_extract_norm
__lowerCAmelCase : Optional[Any] = feat_extract_activation
__lowerCAmelCase : Optional[Any] = list(A_ )
__lowerCAmelCase : int = list(A_ )
__lowerCAmelCase : Any = list(A_ )
__lowerCAmelCase : List[Any] = conv_bias
__lowerCAmelCase : Tuple = num_conv_pos_embeddings
__lowerCAmelCase : Any = num_conv_pos_embedding_groups
__lowerCAmelCase : Tuple = len(self.conv_dim )
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : Optional[int] = intermediate_size
__lowerCAmelCase : str = squeeze_factor
__lowerCAmelCase : Optional[Any] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = position_buckets
__lowerCAmelCase : str = share_att_key
__lowerCAmelCase : List[str] = relative_attention
__lowerCAmelCase : List[str] = norm_rel_ebd
__lowerCAmelCase : Any = list(A_ )
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : str = hidden_dropout
__lowerCAmelCase : Optional[int] = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : Union[str, Any] = feat_proj_dropout
__lowerCAmelCase : str = final_dropout
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : Union[str, Any] = feature_layer_norm_eps
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Union[str, Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : Union[str, Any] = apply_spec_augment
__lowerCAmelCase : List[Any] = mask_time_prob
__lowerCAmelCase : int = mask_time_length
__lowerCAmelCase : Optional[Any] = mask_time_min_masks
__lowerCAmelCase : Any = mask_feature_prob
__lowerCAmelCase : int = mask_feature_length
__lowerCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__lowerCAmelCase : int = ctc_loss_reduction
__lowerCAmelCase : Any = ctc_zero_infinity
# sequence classification
__lowerCAmelCase : Any = use_weighted_layer_sum
__lowerCAmelCase : str = classifier_proj_size
@property
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 492 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 492 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> int:
if hi < 0:
__snake_case : Tuple = len(_UpperCAmelCase )
while lo < hi:
__snake_case : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__snake_case : Union[str, Any] = mid + 1
else:
__snake_case : Union[str, Any] = mid
return lo
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> int:
if hi < 0:
__snake_case : List[Any] = len(_UpperCAmelCase )
while lo < hi:
__snake_case : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__snake_case : Optional[int] = mid + 1
else:
__snake_case : Any = mid
return lo
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> None:
sorted_collection.insert(bisect_left(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = -1 ) -> None:
sorted_collection.insert(bisect_right(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,_UpperCAmelCase )
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> int | None:
__snake_case : str = 0
__snake_case : Optional[Any] = len(_UpperCAmelCase ) - 1
while left <= right:
__snake_case : Any = left + (right - left) // 2
__snake_case : str = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__snake_case : Union[str, Any] = midpoint - 1
else:
__snake_case : Union[str, Any] = midpoint + 1
return None
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ) -> int | None:
__snake_case : Optional[Any] = bisect.bisect_left(_UpperCAmelCase ,_UpperCAmelCase )
if index != len(_UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int | None:
if right < left:
return None
__snake_case : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCAmelCase ,_UpperCAmelCase ,midpoint + 1 ,_UpperCAmelCase )
if __name__ == "__main__":
A__ : Tuple = input('''Enter numbers separated by comma:\n''').strip()
A__ : Dict = sorted(int(item) for item in user_input.split(''','''))
A__ : Any = int(input('''Enter a single number to be found in the list:\n'''))
A__ : Tuple = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 124 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A__ : Optional[int] = False
class snake_case__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : str = torch.manual_seed(0 )
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__snake_case : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = generator.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = 'cyberpunk 2077'
__snake_case : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : Tuple = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0 )
__snake_case : Optional[Any] = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__snake_case : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : int = pipe.image_variation(__a , generator=__a , output_type='numpy' ).images
__snake_case : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : str = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 124 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'SpeechT5FeatureExtractor'
UpperCamelCase__ = 'SpeechT5Tokenizer'
def __init__( self , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
lowercase =kwargs.pop('''audio''' , snake_case_ )
lowercase =kwargs.pop('''text''' , snake_case_ )
lowercase =kwargs.pop('''text_target''' , snake_case_ )
lowercase =kwargs.pop('''audio_target''' , snake_case_ )
lowercase =kwargs.pop('''sampling_rate''' , snake_case_ )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase =self.feature_extractor(snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
elif text is not None:
lowercase =self.tokenizer(snake_case_ , **snake_case_ )
else:
lowercase =None
if audio_target is not None:
lowercase =self.feature_extractor(audio_target=snake_case_ , *snake_case_ , sampling_rate=snake_case_ , **snake_case_ )
lowercase =targets['''input_values''']
elif text_target is not None:
lowercase =self.tokenizer(snake_case_ , **snake_case_ )
lowercase =targets['''input_ids''']
else:
lowercase =None
if inputs is None:
return targets
if targets is not None:
lowercase =labels
lowercase =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase =decoder_attention_mask
return inputs
def _A( self , *snake_case_ , **snake_case_ ):
lowercase =kwargs.pop('''input_values''' , snake_case_ )
lowercase =kwargs.pop('''input_ids''' , snake_case_ )
lowercase =kwargs.pop('''labels''' , snake_case_ )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase =self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
elif input_ids is not None:
lowercase =self.tokenizer.pad(snake_case_ , **snake_case_ )
else:
lowercase =None
if labels is not None:
if "input_ids" in labels or (isinstance(snake_case_ , snake_case_ ) and "input_ids" in labels[0]):
lowercase =self.tokenizer.pad(snake_case_ , **snake_case_ )
lowercase =targets['''input_ids''']
else:
lowercase =self.feature_extractor.feature_size
lowercase =self.feature_extractor.num_mel_bins
lowercase =self.feature_extractor.pad(snake_case_ , *snake_case_ , **snake_case_ )
lowercase =feature_size_hack
lowercase =targets['''input_values''']
else:
lowercase =None
if inputs is None:
return targets
if targets is not None:
lowercase =labels
lowercase =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase =decoder_attention_mask
return inputs
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
| 72 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
__snake_case = torch.load(SCREAMING_SNAKE_CASE , map_location="cpu" )
__snake_case = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__snake_case = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__snake_case = v
else:
__snake_case = v
__snake_case = chkpt["params"]
__snake_case = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
__snake_case = chkpt["dico_word2id"]
__snake_case = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
__snake_case = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__snake_case = pytorch_dump_folder_path + "/" + CONFIG_NAME
__snake_case = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + "\n" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + "\n" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 163 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 | 1 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case = datasets.logging.get_logger(__name__)
snake_case = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
snake_case = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
snake_case = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def _A ( self : List[Any] , UpperCAmelCase_ : List[Any] ):
if self.config_name == "default":
SCREAMING_SNAKE_CASE : int = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
SCREAMING_SNAKE_CASE : Tuple = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _A ( self : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]=False ):
if gpus is None:
SCREAMING_SNAKE_CASE : List[Any] = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE : int = {"src": sources, "mt": predictions, "ref": references}
SCREAMING_SNAKE_CASE : str = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.scorer.predict(UpperCAmelCase_ , gpus=UpperCAmelCase_ , progress_bar=UpperCAmelCase_ )
return {"mean_score": mean_score, "scores": scores}
| 62 |
"""simple docstring"""
UpperCamelCase__ = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
UpperCamelCase__ = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase__ : int = from_type.lower().strip('s' )
UpperCAmelCase__ : List[Any] = to_type.lower().strip('s' )
UpperCAmelCase__ : Optional[int] = UNIT_SYMBOL.get(_snake_case ,_snake_case )
UpperCAmelCase__ : Optional[Any] = UNIT_SYMBOL.get(_snake_case ,_snake_case )
if from_sanitized not in METRIC_CONVERSION:
UpperCAmelCase__ : Optional[int] = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_snake_case )}'''
)
raise ValueError(_snake_case )
if to_sanitized not in METRIC_CONVERSION:
UpperCAmelCase__ : int = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {', '.join(_snake_case )}'''
)
raise ValueError(_snake_case )
UpperCAmelCase__ : str = METRIC_CONVERSION[from_sanitized]
UpperCAmelCase__ : Optional[int] = METRIC_CONVERSION[to_sanitized]
UpperCAmelCase__ : int = 1
if from_exponent > to_exponent:
UpperCAmelCase__ : List[str] = from_exponent - to_exponent
else:
UpperCAmelCase__ : int = -(to_exponent - from_exponent)
return value * pow(10 ,_snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 110 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =0
# if input_string is "aba" than new_input_string become "a|b|a"
SCREAMING_SNAKE_CASE__ : Optional[Any] =''''''
SCREAMING_SNAKE_CASE__ : Dict =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(UpperCamelCase__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] =0, 0
# length[i] shows the length of palindromic substring with center i
SCREAMING_SNAKE_CASE__ : Tuple =[1 for i in range(len(UpperCamelCase__ ) )]
# for each character in new_string find corresponding palindromic string
SCREAMING_SNAKE_CASE__ : Tuple =0
for j in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ : str =1 if j > r else min(length[l + r - j] // 2, r - j + 1 )
while (
j - k >= 0
and j + k < len(UpperCamelCase__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
SCREAMING_SNAKE_CASE__ : Dict =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
SCREAMING_SNAKE_CASE__ : Optional[int] =j - k + 1 # noqa: E741
SCREAMING_SNAKE_CASE__ : Union[str, Any] =j + k - 1
# update max_length and start position
if max_length < length[j]:
SCREAMING_SNAKE_CASE__ : Any =length[j]
SCREAMING_SNAKE_CASE__ : List[str] =j
# create that string
SCREAMING_SNAKE_CASE__ : List[Any] =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 665 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCamelCase , unittest.TestCase ):
snake_case_ = SpeechTaTokenizer
snake_case_ = False
snake_case_ = True
def __magic_name__ ( self : int ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] =SpeechTaTokenizer(__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =AddedToken('''<mask>''' , lstrip=__lowercase , rstrip=__lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : Dict , __lowercase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''this is a test'''
SCREAMING_SNAKE_CASE__ : int ='''this is a test'''
return input_text, output_text
def __magic_name__ ( self : List[Any] , __lowercase : int , __lowercase : Optional[Any]=False , __lowercase : Union[str, Any]=20 , __lowercase : Any=5 ) -> Any:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =self.get_input_output_texts(__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
SCREAMING_SNAKE_CASE__ : str =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] ='''<pad>'''
SCREAMING_SNAKE_CASE__ : Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__lowercase ) , 81 )
def __magic_name__ ( self : Dict ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : str =self.get_tokenizers(do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Any =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : int =['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.add_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple =tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : str ={'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
SCREAMING_SNAKE_CASE__ : int =tokenizer.add_special_tokens(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int =len(__lowercase )
self.assertNotEqual(__lowercase , 0 )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , len(__lowercase ) )
self.assertEqual(__lowercase , all_size_a + len(__lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__lowercase )
self.assertGreaterEqual(len(__lowercase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ ( self : Optional[Any] ) -> Any:
pass
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Dict =self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =tokenizer.convert_tokens_to_ids(__lowercase )
# fmt: off
self.assertListEqual(__lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __magic_name__ ( self : List[str] ) -> List[str]:
# Use custom sequence because this tokenizer does not handle numbers.
SCREAMING_SNAKE_CASE__ : List[Any] =[
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
SCREAMING_SNAKE_CASE__ : str ={
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__lowercase , ) | 665 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=10):
__snake_case = []
for _ in range(snake_case):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=10):
__snake_case = []
for step in range(snake_case):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = os.path.join(snake_case, '''schedule.bin''')
torch.save(scheduler.state_dict(), snake_case)
__snake_case = torch.load(snake_case)
scheduler.load_state_dict(snake_case)
return lrs
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] , A_ : Optional[int] , A_ : int , A_ : Optional[int] ) -> Optional[int]:
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A_ )
__snake_case = torch.tensor([0.4, 0.2, -0.5] )
__snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__snake_case = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
__snake_case = criterion(A_ , A_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowercase ( self : Tuple ) -> int:
__snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=A_ )
__snake_case = torch.tensor([0.4, 0.2, -0.5] )
__snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__snake_case = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=A_ , weight_decay=0.0 , relative_step=A_ , scale_parameter=A_ , warmup_init=A_ , )
for _ in range(1_000 ):
__snake_case = criterion(A_ , A_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCamelCase_ : Union[str, Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCamelCase_ : Any = 1_0
def lowercase ( self : Optional[int] , A_ : List[str] , A_ : List[str] , A_ : Union[str, Any] , A_ : Union[str, Any]=None ) -> Tuple:
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ , msg=A_ )
def lowercase ( self : Dict ) -> Any:
__snake_case = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__snake_case = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
__snake_case , __snake_case = data
__snake_case = scheduler_func(self.optimizer , **A_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__snake_case = unwrap_schedule(A_ , self.num_steps )
self.assertListAlmostEqual(
A_ , A_ , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
__snake_case = scheduler_func(self.optimizer , **A_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(A_ ) # wrap to test picklability of the schedule
__snake_case = unwrap_and_save_reload_schedule(A_ , self.num_steps )
self.assertListEqual(A_ , A_ , msg=f"failed for {scheduler_func} in save and reload" )
class _A :
"""simple docstring"""
def __init__( self : Union[str, Any] , A_ : int ) -> List[str]:
__snake_case = fn
def __call__( self : Dict , *A_ : int , **A_ : Optional[Any] ) -> Dict:
return self.fn(*A_ , **A_ )
@classmethod
def lowercase ( self : Any , A_ : str ) -> Union[str, Any]:
__snake_case = list(map(self , scheduler.lr_lambdas ) ) | 564 | """simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowercase : str = "pt"
elif is_tf_available():
__lowercase : str = "tf"
else:
__lowercase : Dict = "jax"
class _A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = PerceiverTokenizer
UpperCamelCase_ : str = False
def lowercase ( self : Any ) -> str:
super().setUp()
__snake_case = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase ( self : Tuple ) -> Dict:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowercase ( self : str , **A_ : Any ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Union[str, Any] , A_ : Tuple , A_ : List[str]=False , A_ : List[Any]=20 , A_ : Union[str, Any]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__snake_case = []
for i in range(len(A_ ) ):
try:
__snake_case = tokenizer.decode([i] , clean_up_tokenization_spaces=A_ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__snake_case = list(filter(lambda A_ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , A_ ) )
__snake_case = list(filter(lambda A_ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=A_ ) , A_ ) )
if max_length is not None and len(A_ ) > max_length:
__snake_case = toks[:max_length]
if min_length is not None and len(A_ ) < min_length and len(A_ ) > 0:
while len(A_ ) < min_length:
__snake_case = toks + toks
# toks_str = [t[1] for t in toks]
__snake_case = [t[0] for t in toks]
# Ensure consistency
__snake_case = tokenizer.decode(A_ , clean_up_tokenization_spaces=A_ )
if " " not in output_txt and len(A_ ) > 1:
__snake_case = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=A_ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=A_ )
)
if with_prefix_space:
__snake_case = ''' ''' + output_txt
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
return output_txt, output_ids
def lowercase ( self : Optional[int] ) -> List[Any]:
__snake_case = self.perceiver_tokenizer
__snake_case = '''Unicode €.'''
__snake_case = tokenizer(A_ )
__snake_case = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , A_ )
# decoding
__snake_case = tokenizer.decode(A_ )
self.assertEqual(A_ , '''[CLS]Unicode €.[SEP]''' )
__snake_case = tokenizer('''e è é ê ë''' )
__snake_case = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , A_ )
# decoding
__snake_case = tokenizer.decode(A_ )
self.assertEqual(A_ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowercase ( self : str ) -> int:
__snake_case = self.perceiver_tokenizer
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__snake_case = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__snake_case = tokenizer(A_ , padding=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
if FRAMEWORK != "jax":
__snake_case = list(batch.input_ids.numpy()[0] )
else:
__snake_case = list(batch.input_ids.tolist()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowercase ( self : Dict ) -> Union[str, Any]:
__snake_case = self.perceiver_tokenizer
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = tokenizer(A_ , padding=A_ , return_tensors=A_ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , A_ )
self.assertIn('''attention_mask''' , A_ )
self.assertNotIn('''decoder_input_ids''' , A_ )
self.assertNotIn('''decoder_attention_mask''' , A_ )
def lowercase ( self : List[str] ) -> str:
__snake_case = self.perceiver_tokenizer
__snake_case = [
'''Summary of the text.''',
'''Another summary.''',
]
__snake_case = tokenizer(
text_target=A_ , max_length=32 , padding='''max_length''' , truncation=A_ , return_tensors=A_ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
# safety check on max_len default value so we are sure the test works
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ''' He is very happy, UNwant\u00E9d,running'''
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__snake_case = tokenizer.__class__.from_pretrained(A_ )
__snake_case = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
shutil.rmtree(A_ )
__snake_case = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case = tempfile.mkdtemp()
__snake_case = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__snake_case = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__snake_case = tokenizer.encode(A_ , add_special_tokens=A_ )
tokenizer.save_pretrained(A_ )
__snake_case = tokenizer.__class__.from_pretrained(A_ )
__snake_case = after_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case = tokenizer.__class__.from_pretrained(A_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(A_ )
def lowercase ( self : Optional[Any] ) -> str:
__snake_case = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(A_ )
with open(os.path.join(A_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__snake_case = json.load(A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__snake_case = json.load(A_ )
__snake_case = [f"<extra_id_{i}>" for i in range(125 )]
__snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__snake_case = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(A_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A_ , A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(A_ , A_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case = tokenizer_class.from_pretrained(
A_ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=A_ )]
__snake_case = tokenizer_class.from_pretrained(
A_ , additional_special_tokens=A_ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowercase ( self : int ) -> Optional[int]:
__snake_case = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def lowercase ( self : Dict ) -> List[Any]:
pass
def lowercase ( self : Tuple ) -> Dict:
pass
def lowercase ( self : Optional[int] ) -> List[str]:
pass
def lowercase ( self : int ) -> Optional[Any]:
pass
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__snake_case = self.get_tokenizers(fast=A_ , do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__snake_case = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
__snake_case = tokenizer.convert_tokens_to_string(A_ )
self.assertIsInstance(A_ , A_ ) | 564 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def UpperCamelCase__ ( lowerCAmelCase__ ):
return 1 / (1 + np.exp(-z ))
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return (-y * np.log(lowerCAmelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = np.dot(lowerCAmelCase__ ,lowerCAmelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase__ ) ) )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=70_000 ):
lowercase = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase__ ):
lowercase = np.dot(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = sigmoid_function(lowerCAmelCase__ )
lowercase = np.dot(x.T ,h - y ) / y.size
lowercase = theta - alpha * gradient # updating the weights
lowercase = np.dot(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = sigmoid_function(lowerCAmelCase__ )
lowercase = cost_function(lowerCAmelCase__ ,lowerCAmelCase__ )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] =datasets.load_iris()
__SCREAMING_SNAKE_CASE : List[str] =iris.data[:, :2]
__SCREAMING_SNAKE_CASE : Dict =(iris.target != 0) * 1
__SCREAMING_SNAKE_CASE : Optional[Any] =0.1
__SCREAMING_SNAKE_CASE : Optional[Any] =logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def UpperCamelCase__ ( lowerCAmelCase__ ):
return sigmoid_function(
np.dot(lowerCAmelCase__ ,lowerCAmelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : List[Any] =(x[:, 0].min(), x[:, 0].max())
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Optional[int] =(x[:, 1].min(), x[:, 1].max())
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Optional[int] =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__SCREAMING_SNAKE_CASE : Tuple =np.c_[xxa.ravel(), xxa.ravel()]
__SCREAMING_SNAKE_CASE : Any =predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 72 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
__SCREAMING_SNAKE_CASE : List[Any] ='''.'''
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] =os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
__SCREAMING_SNAKE_CASE : Dict =[]
__SCREAMING_SNAKE_CASE : Dict =[]
with open(doctest_file_path) as fp:
for line in fp:
__SCREAMING_SNAKE_CASE : Optional[Any] =line.strip()
__SCREAMING_SNAKE_CASE : Tuple =os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] ='''\n'''.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 72 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Union[str, Any] = '''wavlm'''
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=320 , _lowercase=800 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=(512, 512, 512, 512, 1_500) , _lowercase=(5, 3, 3, 1, 1) , _lowercase=(1, 2, 3, 1, 1) , _lowercase=512 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=False , _lowercase=3 , _lowercase=2 , _lowercase=3 , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_buckets
_lowerCAmelCase = max_bucket_distance
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layerdrop
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_ctc_classes
_lowerCAmelCase = vocab_size
_lowerCAmelCase = do_stable_layer_norm
_lowerCAmelCase = use_weighted_layer_sum
_lowerCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase = num_codevectors_per_group
_lowerCAmelCase = num_codevector_groups
_lowerCAmelCase = contrastive_logits_temperature
_lowerCAmelCase = num_negatives
_lowerCAmelCase = codevector_dim
_lowerCAmelCase = proj_codevector_dim
_lowerCAmelCase = diversity_loss_weight
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# adapter
_lowerCAmelCase = add_adapter
_lowerCAmelCase = adapter_kernel_size
_lowerCAmelCase = adapter_stride
_lowerCAmelCase = num_adapter_layers
_lowerCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = xvector_output_dim
@property
def _lowercase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowercase : List[Any] = None
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase : str = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowercase : int = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowercase : str = '▁'
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ['input_ids', 'token_type_ids']
_A = FNetTokenizer
def __init__( self :Optional[int] , a :Union[str, Any]=None , a :Optional[int]=None , a :str=False , a :Tuple=True , a :Optional[Any]=True , a :Optional[int]="<unk>" , a :List[str]="[SEP]" , a :Optional[Any]="<pad>" , a :int="[CLS]" , a :Dict="[MASK]" , **a :Optional[int] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__UpperCamelCase : List[Any] = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a )
if isinstance(a , a )
else mask_token
)
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
__UpperCamelCase : List[Any] = do_lower_case
__UpperCamelCase : str = remove_space
__UpperCamelCase : Dict = keep_accents
__UpperCamelCase : Union[str, Any] = vocab_file
__UpperCamelCase : List[str] = False if not self.vocab_file else True
def _lowerCamelCase ( self :Dict , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : List[str] = [self.sep_token_id]
__UpperCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self :Union[str, Any] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]:
__UpperCamelCase : List[Any] = [self.sep_token_id]
__UpperCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self :int , a :str , a :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase : str = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 557 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Union[str, Any] = "char"
__lowerCAmelCase : Union[str, Any] = "bpe"
__lowerCAmelCase : List[Any] = "wp"
lowerCamelCase__ : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = ["image_processor", "char_tokenizer"]
__lowerCAmelCase : str = "ViTImageProcessor"
__lowerCAmelCase : List[Any] = "MgpstrTokenizer"
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
lowercase__ : Optional[int] = kwargs.pop("""feature_extractor""")
lowercase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
lowercase__ : Any = tokenizer
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained("""gpt2""")
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-uncased""")
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""")
if images is not None:
lowercase__ : List[str] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if text is not None:
lowercase__ : Union[str, Any] = self.char_tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase__ : Tuple = encodings["""input_ids"""]
return inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ : int = sequences
lowercase__ : Dict = char_preds.size(0)
lowercase__ , lowercase__ : Any = self._decode_helper(_SCREAMING_SNAKE_CASE , """char""")
lowercase__ , lowercase__ : Any = self._decode_helper(_SCREAMING_SNAKE_CASE , """bpe""")
lowercase__ , lowercase__ : Any = self._decode_helper(_SCREAMING_SNAKE_CASE , """wp""")
lowercase__ : List[str] = []
lowercase__ : Optional[int] = []
for i in range(_SCREAMING_SNAKE_CASE):
lowercase__ : Optional[int] = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowercase__ : Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowercase__ : List[Any] = scores.index(max(_SCREAMING_SNAKE_CASE))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
lowercase__ : Dict = {}
lowercase__ : Any = final_strs
lowercase__ : int = final_scores
lowercase__ : List[Any] = char_strs
lowercase__ : Dict = bpe_strs
lowercase__ : Any = wp_strs
return out
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowercase__ : Optional[Any] = self.char_decode
lowercase__ : int = 1
lowercase__ : Dict = """[s]"""
elif format == DecodeType.BPE:
lowercase__ : Dict = self.bpe_decode
lowercase__ : Optional[Any] = 2
lowercase__ : Dict = """#"""
elif format == DecodeType.WORDPIECE:
lowercase__ : Optional[int] = self.wp_decode
lowercase__ : Any = 1_02
lowercase__ : Optional[Any] = """[SEP]"""
else:
raise ValueError(f'Format {format} is not supported.')
lowercase__ , lowercase__ : List[str] = [], []
lowercase__ : Any = pred_logits.size(0)
lowercase__ : List[str] = pred_logits.size(1)
lowercase__ , lowercase__ : List[Any] = pred_logits.topk(1 , dim=-1 , largest=_SCREAMING_SNAKE_CASE , sorted=_SCREAMING_SNAKE_CASE)
lowercase__ : int = preds_index.view(-1 , _SCREAMING_SNAKE_CASE)[:, 1:]
lowercase__ : List[Any] = decoder(_SCREAMING_SNAKE_CASE)
lowercase__ , lowercase__ : List[Any] = torch.nn.functional.softmax(_SCREAMING_SNAKE_CASE , dim=2).max(dim=2)
lowercase__ : List[Any] = preds_max_prob[:, 1:]
for index in range(_SCREAMING_SNAKE_CASE):
lowercase__ : Dict = preds_str[index].find(_SCREAMING_SNAKE_CASE)
lowercase__ : Dict = preds_str[index][:pred_eos]
lowercase__ : Dict = preds_index[index].cpu().tolist()
lowercase__ : Union[str, Any] = pred_index.index(_SCREAMING_SNAKE_CASE) if eos_token in pred_index else -1
lowercase__ : Tuple = preds_max_prob[index][: pred_eos_index + 1]
lowercase__ : Optional[Any] = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_SCREAMING_SNAKE_CASE)
conf_scores.append(_SCREAMING_SNAKE_CASE)
return dec_strs, conf_scores
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [seq.replace(""" """ , """""") for seq in self.char_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE)]
return decode_strs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = [seq.replace(""" """ , """""") for seq in self.wp_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE)]
return decode_strs
| 709 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ : List[str] = """RegNetConfig"""
# Base docstring
lowerCamelCase__ : str = """facebook/regnet-y-040"""
lowerCamelCase__ : str = [1, 1_0_8_8, 7, 7]
# Image classification docstring
lowerCamelCase__ : Union[str, Any] = """facebook/regnet-y-040"""
lowerCamelCase__ : str = """tabby, tabby cat"""
lowerCamelCase__ : Optional[Any] = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = "relu" , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase__ : str = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
lowercase__ : Optional[int] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , strides=SCREAMING_SNAKE_CASE_ , padding="""VALID""" , groups=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name="""convolution""" , )
lowercase__ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""")
lowercase__ : Any = ACTaFN[activation] if activation is not None else tf.identity
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.convolution(self.padding(SCREAMING_SNAKE_CASE_))
lowercase__ : List[Any] = self.normalization(SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.activation(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = config.num_channels
lowercase__ : int = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = shape_list(SCREAMING_SNAKE_CASE_)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""")
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase__ : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 2, 3, 1))
lowercase__ : Optional[int] = self.embedder(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , strides=SCREAMING_SNAKE_CASE_ , use_bias=SCREAMING_SNAKE_CASE_ , name="""convolution""")
lowercase__ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="""normalization""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False):
'''simple docstring'''
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE_) , training=SCREAMING_SNAKE_CASE_)
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name="""pooler""")
lowercase__ : Tuple = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation="""relu""" , name="""attention.0"""),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2"""),
]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE_)
for layer_module in self.attention:
lowercase__ : Optional[Any] = layer_module(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = hidden_state * pooled
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = in_channels != out_channels or stride != 1
lowercase__ : Any = max(1 , out_channels // config.groups_width)
lowercase__ : List[Any] = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name="""shortcut""")
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""")
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase__ : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0"""),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name="""layer.1"""),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name="""layer.2"""),
]
lowercase__ : Tuple = ACTaFN[config.hidden_act]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = hidden_state
for layer_module in self.layers:
lowercase__ : Any = layer_module(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = self.shortcut(SCREAMING_SNAKE_CASE_)
hidden_state += residual
lowercase__ : Any = self.activation(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Any = in_channels != out_channels or stride != 1
lowercase__ : str = max(1 , out_channels // config.groups_width)
lowercase__ : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name="""shortcut""")
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""")
)
lowercase__ : Any = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0"""),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , groups=SCREAMING_SNAKE_CASE_ , activation=config.hidden_act , name="""layer.1"""),
TFRegNetSELayer(SCREAMING_SNAKE_CASE_ , reduced_channels=int(round(in_channels / 4)) , name="""layer.2"""),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE_ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE_ , name="""layer.3"""),
]
lowercase__ : Union[str, Any] = ACTaFN[config.hidden_act]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Union[str, Any] = hidden_state
for layer_module in self.layers:
lowercase__ : Optional[Any] = layer_module(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE_)
hidden_state += residual
lowercase__ : Optional[int] = self.activation(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 2 , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
lowercase__ : Dict = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , name="""layers.0"""),
*[layer(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , name=f'layers.{i+1}') for i in range(depth - 1)],
]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
for layer_module in self.layers:
lowercase__ : Tuple = layer_module(SCREAMING_SNAKE_CASE_)
return hidden_state
class _snake_case ( tf.keras.layers.Layer ):
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ))
lowercase__ : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE_ , config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , depth=SCREAMING_SNAKE_CASE_ , name=f'stages.{i+1}'))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True):
'''simple docstring'''
lowercase__ : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ : List[Any] = hidden_states + (hidden_state,)
lowercase__ : str = stage_module(SCREAMING_SNAKE_CASE_)
if output_hidden_states:
lowercase__ : Optional[int] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_)
@keras_serializable
class _snake_case ( tf.keras.layers.Layer ):
__lowerCAmelCase : List[Any] = RegNetConfig
def __init__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = config
lowercase__ : List[Any] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE_ , name="""embedder""")
lowercase__ : Any = TFRegNetEncoder(SCREAMING_SNAKE_CASE_ , name="""encoder""")
lowercase__ : Optional[int] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE_ , name="""pooler""")
@unpack_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , ):
'''simple docstring'''
lowercase__ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = self.embedder(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.encoder(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = encoder_outputs[0]
lowercase__ : Tuple = self.pooler(SCREAMING_SNAKE_CASE_)
# Change to NCHW output format have uniformity in the modules
lowercase__ : Dict = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2))
lowercase__ : Optional[Any] = tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase__ : Optional[int] = tuple([tf.transpose(SCREAMING_SNAKE_CASE_ , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE_ , pooler_output=SCREAMING_SNAKE_CASE_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Dict = RegNetConfig
__lowerCAmelCase : Any = 'regnet'
__lowerCAmelCase : List[str] = 'pixel_values'
@property
def lowercase__ ( self):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa)}
lowerCamelCase__ : str = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ : Dict = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , UpperCAmelCase_ , )
class _snake_case ( UpperCAmelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : Any = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name="""regnet""")
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=False , ):
'''simple docstring'''
lowercase__ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Dict = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , UpperCAmelCase_ , )
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ ):
def __init__( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : int = config.num_labels
lowercase__ : Optional[int] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE_ , name="""regnet""")
# classification head
lowercase__ : Any = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""") if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE_)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=False , ):
'''simple docstring'''
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[str] = self.regnet(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = outputs.pooler_output if return_dict else outputs[1]
lowercase__ : Optional[int] = self.classifier[0](SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.classifier[1](SCREAMING_SNAKE_CASE_)
lowercase__ : str = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_)
if not return_dict:
lowercase__ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states)
| 495 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase( _a ):
snake_case_ : Tuple = """M-CLIP"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=1_0_2_4 , SCREAMING_SNAKE_CASE : List[Any]=7_6_8 , **SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = transformerDimSize
__snake_case = imageDimSize
super().__init__(**SCREAMING_SNAKE_CASE )
class UpperCamelCase( _a ):
snake_case_ : List[str] = MCLIPConfig
def __init__( self : str , SCREAMING_SNAKE_CASE : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ) -> List[str]:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__snake_case = XLMRobertaModel(SCREAMING_SNAKE_CASE )
__snake_case = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> str:
'''simple docstring'''
__snake_case = self.transformer(input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0]
__snake_case = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(SCREAMING_SNAKE_CASE ), embs
| 371 |
import inspect
import unittest
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
__snake_case = inspect.getmembers(SCREAMING_SNAKE_CASE , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
__snake_case = "k-diffusion"
elif backend == "invisible_watermark":
__snake_case = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 371 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 713 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _A (lowerCAmelCase__ :np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
_a = np.nan
for i in range(lowerCAmelCase__ ):
_a = features[:, labels == i]
_a = data.mean(1 )
# Centralize the data of class i
_a = data - column_reshape(lowerCAmelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCAmelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_a = np.dot(lowerCAmelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
_a = features.mean(1 )
_a = np.nan
for i in range(lowerCAmelCase__ ):
_a = features[:, labels == i]
_a = data.shape[1]
_a = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ ) , (column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_a = device_data * np.dot(
column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ ) , (column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ )).T , )
return covariance_sum / features.shape[1]
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
if features.any():
_a = features.mean(1 )
# Center the dataset
_a = features - np.reshape(lowerCAmelCase__ , (data_mean.size, 1) )
_a = np.dot(lowerCAmelCase__ , centered_data.T ) / features.shape[1]
_a , _a = np.linalg.eigh(lowerCAmelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
_a = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_a = np.dot(filtered_eigenvectors.T , lowerCAmelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCAmelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_a , _a = eigh(
covariance_between_classes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , covariance_within_classes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
_a = eigenvectors[:, ::-1][:, :dimensions]
_a , _a , _a = np.linalg.svd(lowerCAmelCase__ )
_a = svd_matrix[:, 0:dimensions]
_a = np.dot(filtered_svd_matrix.T , lowerCAmelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCAmelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def _A () -> None:
'''simple docstring'''
_a = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_a = np.array([0, 0, 0, 1, 1] )
_a = 2
_a = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCAmelCase__ ) as error_info:
_a = linear_discriminant_analysis(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _A () -> None:
'''simple docstring'''
_a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_a = 2
_a = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCAmelCase__ ) as error_info:
_a = principal_component_analysis(lowerCAmelCase__ , lowerCAmelCase__ )
if not np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase : Dict = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['''DeiTFeatureExtractor''']
__lowercase : Tuple = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 0 |
from __future__ import annotations
from typing import TypedDict
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
a_ : Optional[Any] =42
a_ : int =42
def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Tuple:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(lowercase__ ) )]
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] )-> Tuple:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
_snake_case : List[str] = all_rotations(lowercase__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : Optional[int] = {
'bwt_string': ''.join([word[-1] for word in rotations] ),
'idx_original_string': rotations.index(lowercase__ ),
}
return response
def lowerCamelCase_ ( lowerCAmelCase: List[Any] , lowerCAmelCase: Tuple )-> Union[str, Any]:
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
_snake_case : Dict = int(lowercase__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(lowercase__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
_snake_case : Optional[int] = [''] * len(lowercase__ )
for _ in range(len(lowercase__ ) ):
for i in range(len(lowercase__ ) ):
_snake_case : str = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCAmelCase_ = """Provide a string that I will generate its BWT transform: """
lowerCAmelCase_ = input(entry_msg).strip()
lowerCAmelCase_ = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result["bwt_string"]}'"""
)
lowerCAmelCase_ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
F"""we get original string '{original_string}'"""
)
| 702 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""roberta"""
def __init__( self : int , UpperCamelCase : Tuple=5_02_65 , UpperCamelCase : Any=7_68 , UpperCamelCase : List[Any]=12 , UpperCamelCase : str=12 , UpperCamelCase : Dict=30_72 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=5_12 , UpperCamelCase : List[str]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : str=1 , UpperCamelCase : int=0 , UpperCamelCase : Any=2 , UpperCamelCase : int="absolute" , UpperCamelCase : int=True , UpperCamelCase : List[Any]=None , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
_snake_case : Any = vocab_size
_snake_case : List[str] = hidden_size
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : List[str] = hidden_act
_snake_case : Union[str, Any] = intermediate_size
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : Dict = max_position_embeddings
_snake_case : Optional[int] = type_vocab_size
_snake_case : Tuple = initializer_range
_snake_case : int = layer_norm_eps
_snake_case : Dict = position_embedding_type
_snake_case : Union[str, Any] = use_cache
_snake_case : str = classifier_dropout
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.