code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
A : Tuple = "src/transformers"
A : Optional[Any] = "docs/source/en/tasks"
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
with open(__UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
A : List[str] = direct_transformers_import(TRANSFORMERS_PATH)
A : List[Any] = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
A : Any = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase , set() )
SCREAMING_SNAKE_CASE_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def a__ ( __UpperCamelCase , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase , __UpperCamelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
SCREAMING_SNAKE_CASE_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
A : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 118
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
A : Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = None
lowerCamelCase__ = "utf-8"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True # deprecated
lowerCamelCase__ = None # deprecated
lowerCamelCase__ = 1_0 << 2_0 # 10MB
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = JsonConfig
def __A ( self : Optional[int] ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
SCREAMING_SNAKE_CASE_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : List[str] , __magic_name__ : str ) -> Tuple:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : str , __magic_name__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE_ = self.config.features.arrow_schema.field(__magic_name__ ).type
SCREAMING_SNAKE_CASE_ = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.config.features.arrow_schema )
return pa_table
def __A ( self : List[str] , __magic_name__ : List[str] ) -> int:
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__magic_name__ , (list, tuple) ):
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
yield file_idx, self._cast_table(__magic_name__ )
# If the file has one json object per line
else:
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE_ = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
SCREAMING_SNAKE_CASE_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__magic_name__ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE_ = batch.decode(self.config.encoding , errors=__magic_name__ ).encode("utf-8" )
try:
while True:
try:
SCREAMING_SNAKE_CASE_ = paj.read_json(
io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__magic_name__ , pa.ArrowInvalid )
and "straddling" not in str(__magic_name__ )
or block_size > len(__magic_name__ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F'''Batch of {len(__magic_name__ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE_ = json.load(__magic_name__ )
except json.JSONDecodeError:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE_ = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE_ = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict(__magic_name__ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(F'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(__magic_name__ )
break
else:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise ValueError(
F'''Not able to read records in the JSON file at {file}. '''
F'''You should probably indicate the field of the JSON file containing your records. '''
F'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
F'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__magic_name__ )
batch_idx += 1
| 118
| 1
|
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase__ = 3
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
print("Generating primitive root of p" )
while True:
lowercase__ : List[str] = random.randrange(3 , lowerCamelCase__ )
if pow(lowerCamelCase__ , 2 , lowerCamelCase__ ) == 1:
continue
if pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) == 1:
continue
return g
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
print("Generating prime p..." )
lowercase__ : List[Any] = rabin_miller.generate_large_prime(lowerCamelCase__ ) # select large prime number.
lowercase__ : str = primitive_root(lowerCamelCase__ ) # one primitive root on modulo p.
lowercase__ : List[str] = random.randrange(3 , lowerCamelCase__ ) # private_key -> have to be greater than 2 for safety.
lowercase__ : Dict = cryptomath.find_mod_inverse(pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
lowercase__ : List[Any] = (key_size, e_a, e_a, p)
lowercase__ : List[Any] = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
lowercase__ : Optional[int] = generate_key(lowerCamelCase__ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __lowerCamelCase ( ):
"""simple docstring"""
print("Making key files..." )
make_key_files("elgamal" , 2_048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 359
|
from math import ceil, sqrt
def __lowerCamelCase ( lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : int = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ : List[str] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'''{solution() = }''')
| 121
| 0
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
A__ = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def a_ ( self : int ) -> Any:
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
A__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def a_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
A__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def a_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
A__ = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
A__ = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def a_ ( self : List[str] ) -> str:
"""simple docstring"""
A__ = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def a_ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
A__ = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def a_ ( self : str ) -> str:
"""simple docstring"""
import PIL.Image
A__ = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__lowerCAmelCase ) as mock_cast_to_python_objects:
A__ = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
A__ , A__ = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __lowerCAmelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def __lowerCamelCase ( __a :Optional[Any] , __a :int ) -> Optional[Any]:
"""simple docstring"""
A__ = pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a )
A__ = pa.ipc.open_stream(__a )
A__ = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCamelCase ( __a :Dict , __a :Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = pa.BufferOutputStream()
A__ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = pa.BufferOutputStream()
A__ = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__a , features=__a ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
A__ = pa.BufferReader(output.getvalue() )
A__ = pa.ipc.open_stream(__a )
A__ = f.read_all()
A__ = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def __lowerCamelCase ( __a :int ) -> Dict:
"""simple docstring"""
A__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
A__ , A__ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
A__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
A__ , A__ = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def __lowerCamelCase ( __a :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCamelCase ( __a :Any , __a :Any ) -> Optional[int]:
"""simple docstring"""
A__ = pa.BufferOutputStream()
A__ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCamelCase ( __a :Tuple , __a :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A__ = pa.BufferOutputStream()
A__ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __lowerCamelCase ( __a :List[str] , __a :str ) -> Tuple:
"""simple docstring"""
A__ = pa.BufferOutputStream()
A__ = pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
A__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
A__ = os.path.join(__a , """test.arrow""" )
with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(__a , 1 )
def __lowerCamelCase ( __a :Tuple ) -> List[str]:
"""simple docstring"""
if pa.types.is_list(__a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __lowerCamelCase ( __a :Any , __a :Optional[int] ) -> int:
"""simple docstring"""
if isinstance(lst[0] , __a ):
change_first_primitive_element_in_list(lst[0] , __a )
else:
A__ = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( __a :str , __a :Dict , __a :Dict ) -> Any:
"""simple docstring"""
A__ = pa.array(TypedSequence(__a , optimized_int_type=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __lowerCamelCase ( __a :Tuple , __a :List[Any] , __a :Tuple ) -> Tuple:
"""simple docstring"""
A__ = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
A__ = copy.deepcopy(__a )
A__ = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__a , __a )
A__ = pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def __lowerCamelCase ( __a :Optional[Any] , __a :int ) -> List[str]:
"""simple docstring"""
A__ = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __lowerCamelCase ( __a :Tuple ) -> List[str]:
"""simple docstring"""
A__ = """mock://dataset-train.arrow"""
with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a )
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
A__ = pa.BufferOutputStream()
with ParquetWriter(stream=__a ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
A__ , A__ = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
A__ = pa.BufferReader(output.getvalue() )
A__ = pq.read_table(__a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def __lowerCamelCase ( __a :Dict , __a :Dict ) -> Union[str, Any]:
"""simple docstring"""
import PIL.Image
A__ = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="""png""" )
A__ = pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({"""image""": Image()} ) , embed_local_files=__a ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
A__ = pa.BufferReader(output.getvalue() )
A__ = pq.read_table(__a )
A__ = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __a )
with open(__a , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ = pa.schema([pa.field("""col_1""" , pa.string() , nullable=__a )] )
A__ = pa.BufferOutputStream()
with ArrowWriter(stream=__a ) as writer:
writer._build_writer(inferred_schema=__a )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 274
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A : Dict = Lock()
def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def __lowerCamelCase ( __a :List[str] ) -> int:
"""simple docstring"""
A__ = []
A__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A__ = temp_rs
A__ = temp_rr
for i in range(1 , len(__a ) - 1 ):
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A__ = temp_rs
A__ = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
A__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*__a )
A__ = odd_even_transposition(__a )
print("""Sorted List\n""" )
print(*__a )
if __name__ == "__main__":
main()
| 274
| 1
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
return "".join(sorted(SCREAMING_SNAKE_CASE__ ) )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[str]:
return word_by_signature[signature(SCREAMING_SNAKE_CASE__ )]
lowercase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
lowercase : Dict = sorted({word.strip().lower() for word in data.splitlines()})
lowercase : Union[str, Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowercase : int = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 350
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Tuple = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __snake_case ( lowerCAmelCase ):
_a : List[str]= "umt5"
_a : Optional[Any]= ["past_key_values"]
def __init__( self ,snake_case=250112 ,snake_case=512 ,snake_case=64 ,snake_case=1024 ,snake_case=8 ,snake_case=None ,snake_case=6 ,snake_case=32 ,snake_case=128 ,snake_case=0.1 ,snake_case=1e-6 ,snake_case=1.0 ,snake_case="gated-gelu" ,snake_case=True ,snake_case=True ,snake_case="T5Tokenizer" ,snake_case=True ,snake_case=0 ,snake_case=1 ,snake_case=0 ,**snake_case ,):
'''simple docstring'''
super().__init__(
is_encoder_decoder=snake_case ,tokenizer_class=snake_case ,tie_word_embeddings=snake_case ,pad_token_id=snake_case ,eos_token_id=snake_case ,decoder_start_token_id=snake_case ,**snake_case ,)
lowercase : List[str] = vocab_size
lowercase : Optional[Any] = d_model
lowercase : int = d_kv
lowercase : List[Any] = d_ff
lowercase : Dict = num_layers
lowercase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase : List[Any] = num_heads
lowercase : Optional[Any] = relative_attention_num_buckets
lowercase : Dict = relative_attention_max_distance
lowercase : Dict = dropout_rate
lowercase : Any = layer_norm_epsilon
lowercase : Any = initializer_factor
lowercase : Union[str, Any] = feed_forward_proj
lowercase : Optional[Any] = use_cache
lowercase : Dict = self.feed_forward_proj.split("""-""" )
lowercase : List[Any] = act_info[-1]
lowercase : Any = act_info[0] == """gated"""
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
lowercase : int = """gelu_new"""
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.d_model
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.num_heads
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self.num_layers
class __snake_case ( lowerCAmelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowercase : List[Any] = """past_encoder_sequence + sequence"""
lowercase : Dict = {0: """batch"""}
lowercase : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
lowercase : List[Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(snake_case ,direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 13
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 5e-4
| 285
| 0
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowerCAmelCase__ = 5_00_03
lowerCAmelCase__ = 5_00_02
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PLBartTokenizer
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
_A : Union[str, Any] = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self) -> List[Any]:
_A : List[Any] = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase)
_A : List[Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_A : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_A : Optional[int] = tokenizer.convert_tokens_to_ids(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_A : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_A : Tuple = tokenizer.vocab_size
_A : int = [tokenizer.convert_ids_to_tokens(__lowerCamelCase) for x in range(end - 4 , __lowerCamelCase)]
self.assertListEqual(__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "<mask>"])
_A : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_A : Dict = tokenizer(__lowerCamelCase).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase) , __lowerCamelCase , )
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : str = PLBartTokenizer(__lowerCamelCase , language_codes="multi" , keep_accents=__lowerCamelCase)
_A : Optional[Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_A : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_A : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_A : str = tokenizer.convert_ids_to_tokens(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_A : Dict = tokenizer.vocab_size
_A : List[Any] = [tokenizer.convert_ids_to_tokens(__lowerCamelCase) for x in range(end - 7 , __lowerCamelCase)]
self.assertListEqual(
__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"])
_A : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_A : Any = tokenizer(__lowerCamelCase).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase) , __lowerCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "uclanlp/plbart-python-en_XX"
__SCREAMING_SNAKE_CASE = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
__SCREAMING_SNAKE_CASE = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
__SCREAMING_SNAKE_CASE = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCamelCase ( cls) -> str:
_A : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX")
_A : str = 1
return cls
def _lowerCamelCase ( self) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3)
def _lowerCamelCase ( self) -> Any:
_A : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids)
_A : int = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
_A : Dict = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase)
_A : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , __lowerCamelCase)
_A : str = 1_0
_A : str = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __lowerCamelCase)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"]) , [5_0_0_0_4, 5_0_0_0_1])
def _lowerCamelCase ( self) -> str:
_A : Any = tempfile.mkdtemp()
_A : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase)
_A : int = PLBartTokenizer.from_pretrained(__lowerCamelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase)
@require_torch
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt")
_A : Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCamelCase)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _lowerCamelCase ( self) -> Optional[Any]:
_A : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
_A : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
self.assertEqual((2, 2_6) , batch.input_ids.shape)
self.assertEqual((2, 2_6) , batch.attention_mask.shape)
_A : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _lowerCamelCase ( self) -> List[Any]:
_A : List[Any] = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt")
_A : Dict = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="pt")
_A : Any = targets["input_ids"]
_A : Union[str, Any] = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : str = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java")
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 11
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
def A ( _lowercase , _lowercase=False , _lowercase=False ):
SCREAMING_SNAKE_CASE : List[str] = '''backbone.''' if is_semantic else ''''''
SCREAMING_SNAKE_CASE : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(f"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(f"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(f"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A ( _lowercase , _lowercase , _lowercase=False , _lowercase=False ):
for i in range(config.num_hidden_layers ):
SCREAMING_SNAKE_CASE : int = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
SCREAMING_SNAKE_CASE : int = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE : List[str] = q_bias
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
SCREAMING_SNAKE_CASE : Optional[int] = gamma_a
SCREAMING_SNAKE_CASE : str = gamma_a
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : str = dct.pop(_lowercase )
SCREAMING_SNAKE_CASE : str = val
def A ( ):
SCREAMING_SNAKE_CASE : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def A ( _lowercase , _lowercase , _lowercase=False ):
SCREAMING_SNAKE_CASE : Dict = False if '''rvlcdip''' in checkpoint_url else True
SCREAMING_SNAKE_CASE : Optional[Any] = BeitConfig(use_absolute_position_embeddings=_lowercase , use_mask_token=_lowercase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] = 1_024
SCREAMING_SNAKE_CASE : int = 4_096
SCREAMING_SNAKE_CASE : List[Any] = 24
SCREAMING_SNAKE_CASE : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] = 16
SCREAMING_SNAKE_CASE : Tuple = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : Dict = '''rvlcdip-id2label.json'''
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Tuple = idalabel
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE : int = create_rename_keys(_lowercase , has_lm_head=_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
read_in_q_k_v(_lowercase , _lowercase , has_lm_head=_lowercase )
# load HuggingFace model
SCREAMING_SNAKE_CASE : List[str] = BeitForMaskedImageModeling(_lowercase ) if has_lm_head else BeitForImageClassification(_lowercase )
model.eval()
model.load_state_dict(_lowercase )
# Check outputs on an image
SCREAMING_SNAKE_CASE : Tuple = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=_lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE : Tuple = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE : Dict = model(_lowercase )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify logits
SCREAMING_SNAKE_CASE : Tuple = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(_lowercase ), "Shape of logits not as expected"
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
if has_lm_head:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
SCREAMING_SNAKE_CASE : str = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_lowercase , )
model.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_lowercase , )
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__UpperCamelCase : int = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 258
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '▁'
__UpperCamelCase : str = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCamelCase : List[str] = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCamelCase : Dict = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ = []
UpperCamelCase_ = []
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Union[str, Any]="</s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[Any]="<s>" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : str = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : Dict = len(self.sp_model )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ )
}
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE : List[str] = src_lang if src_lang is not None else '''en_XX'''
SCREAMING_SNAKE_CASE : Any = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : Tuple = {}
SCREAMING_SNAKE_CASE : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __A ( self : int ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __A ( self : Tuple , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
SCREAMING_SNAKE_CASE : Dict = src_lang
SCREAMING_SNAKE_CASE : Union[str, Any] = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_tokens_to_ids(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = tgt_lang_id
return inputs
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : Any , UpperCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __A ( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __A ( self : int , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def __A ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = src_lang
SCREAMING_SNAKE_CASE : List[str] = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self : List[str] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self : str , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
def __A ( self : List[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.lang_code_to_id[lang]
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : List[str] = [self.eos_token_id, self.cur_lang_code]
| 258
| 1
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
return int((input_a, input_a).count(0 ) == 0 )
def __UpperCamelCase ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 198
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def __UpperCamelCase ( UpperCAmelCase ):
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Dict = np.nan
for i in range(UpperCAmelCase ):
lowercase__ : Optional[Any] = features[:, labels == i]
lowercase__ : Optional[Any] = data.mean(1 )
# Centralize the data of class i
lowercase__ : Dict = data - column_reshape(UpperCAmelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCAmelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : List[str] = np.dot(UpperCAmelCase , centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Tuple = features.mean(1 )
lowercase__ : Dict = np.nan
for i in range(UpperCAmelCase ):
lowercase__ : List[str] = features[:, labels == i]
lowercase__ : int = data.shape[1]
lowercase__ : Optional[int] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ : Optional[int] = device_data * np.dot(
column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase ) , (column_reshape(UpperCAmelCase ) - column_reshape(UpperCAmelCase )).T , )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# Check if the features have been loaded
if features.any():
lowercase__ : Optional[Any] = features.mean(1 )
# Center the dataset
lowercase__ : List[str] = features - np.reshape(UpperCAmelCase , (data_mean.size, 1) )
lowercase__ : Optional[Any] = np.dot(UpperCAmelCase , centered_data.T ) / features.shape[1]
lowercase__ , lowercase__ : Tuple = np.linalg.eigh(UpperCAmelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ : str = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ : Tuple = np.dot(filtered_eigenvectors.T , UpperCAmelCase )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__ , lowercase__ : Any = eigh(
covariance_between_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , covariance_within_classes(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
lowercase__ : Optional[int] = eigenvectors[:, ::-1][:, :dimensions]
lowercase__ , lowercase__ , lowercase__ : Optional[int] = np.linalg.svd(UpperCAmelCase )
lowercase__ : List[str] = svd_matrix[:, 0:dimensions]
lowercase__ : str = np.dot(filtered_svd_matrix.T , UpperCAmelCase )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=UpperCAmelCase )
logging.error('''Dataset empty''' )
raise AssertionError
def __UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
lowercase__ : List[str] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase__ : Optional[Any] = np.array([0, 0, 0, 1, 1] )
lowercase__ : str = 2
lowercase__ : Dict = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCAmelCase ) as error_info:
lowercase__ : int = linear_discriminant_analysis(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if isinstance(UpperCAmelCase , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase__ : int = 2
lowercase__ : Any = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCAmelCase ) as error_info:
lowercase__ : Dict = principal_component_analysis(UpperCAmelCase , UpperCAmelCase )
if not np.allclose(UpperCAmelCase , UpperCAmelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
| 1
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Any=13 , lowercase_ : Optional[int]=7 , lowercase_ : List[Any]=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Any=99 , lowercase_ : List[str]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Tuple=4 , lowercase_ : int=37 , lowercase_ : Any="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=512 , lowercase_ : Optional[Any]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Any=0.02 , lowercase_ : str=False , lowercase_ : List[Any]=True , lowercase_ : Dict="None" , lowercase_ : Tuple=3 , lowercase_ : Any=4 , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : List[str] = is_training
lowercase_ : List[Any] = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : Union[str, Any] = use_labels
lowercase_ : Dict = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : Dict = type_vocab_size
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : List[Any] = initializer_range
lowercase_ : Union[str, Any] = num_labels
lowercase_ : Any = num_choices
lowercase_ : Optional[int] = relative_attention
lowercase_ : Tuple = position_biased_input
lowercase_ : List[str] = pos_att_type
lowercase_ : Any = scope
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Optional[Any] = None
if self.use_input_mask:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase_ : List[str] = None
if self.use_token_type_ids:
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : str = None
lowercase_ : List[str] = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : str ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_config()
lowercase_ : int = 300
return config
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Tuple ):
lowercase_ : str = DebertaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )[0]
lowercase_ : Any = model(lowercase_ , token_type_ids=lowercase_ )[0]
lowercase_ : Any = model(lowercase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase_ : Any = DebertaForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int ):
lowercase_ : List[Any] = self.num_labels
lowercase_ : Optional[Any] = DebertaForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str ):
lowercase_ : List[Any] = self.num_labels
lowercase_ : List[Any] = DebertaForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict ):
lowercase_ : Any = DebertaForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : List[Any] = config_and_inputs
lowercase_ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[int] = DebertaModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = DebertaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@unittest.skip(reason="""Model not available yet""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
lowercase_ : Union[str, Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase_ : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : int = model(lowercase_ , attention_mask=lowercase_ )[0]
# compare the actual values for a slice.
lowercase_ : str = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 21
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_a = random.Random()
if is_torch_available():
import torch
def __A ( __lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None )-> Union[str, Any]:
"""simple docstring"""
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=7 , UpperCAmelCase=400 , UpperCAmelCase=2000 , UpperCAmelCase=1 , UpperCAmelCase=0.0 , UpperCAmelCase=1_6000 , UpperCAmelCase=True , UpperCAmelCase=True , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def UpperCamelCase ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , UpperCAmelCase=False , UpperCAmelCase=False ):
"""simple docstring"""
def _flatten(UpperCAmelCase ):
return list(itertools.chain(*UpperCAmelCase ) )
if equal_length:
_UpperCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = ASTFeatureExtractor
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ASTFeatureExtractionTester(self )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_UpperCAmelCase = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
_UpperCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feat_extract(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='np' ).input_values
_UpperCAmelCase = feat_extract(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(UpperCAmelCase )
_UpperCAmelCase = feat_extract(UpperCAmelCase , return_tensors='np' ).input_values
_UpperCAmelCase = feat_extract(UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
from datasets import load_dataset
_UpperCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = ASTFeatureExtractor()
_UpperCAmelCase = feature_extractor(UpperCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCAmelCase , atol=1e-4 ) )
| 39
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( __magic_name__ ):
lowercase = 42
lowercase = 42
def __init__( self : Any , a : UNetaDModel , a : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[str] , a : int = 1 , a : int = 2_000 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , **a : int , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.unet.config.sample_size
lowerCAmelCase__ : Union[str, Any] = (batch_size, 3, img_size, img_size)
lowerCAmelCase__ : Tuple = self.unet
lowerCAmelCase__ : Optional[Any] = randn_tensor(a , generator=a ) * self.scheduler.init_noise_sigma
lowerCAmelCase__ : int = sample.to(self.device )
self.scheduler.set_timesteps(a )
self.scheduler.set_sigmas(a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase__ : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase__ : Optional[Any] = self.unet(a , a ).sample
lowerCAmelCase__ : Dict = self.scheduler.step_correct(a , a , generator=a ).prev_sample
# prediction step
lowerCAmelCase__ : Optional[int] = model(a , a ).sample
lowerCAmelCase__ : Optional[Any] = self.scheduler.step_pred(a , a , a , generator=a )
lowerCAmelCase__ , lowerCAmelCase__ : str = output.prev_sample, output.prev_sample_mean
lowerCAmelCase__ : Any = sample_mean.clamp(0 , 1 )
lowerCAmelCase__ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a )
| 212
| 0
|
def __snake_case ( _lowerCAmelCase : list ) -> List[str]:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : int = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , _lowerCAmelCase )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A_ , A_ : int = arr[k - 1], arr[i]
else: # k is odd
A_ , A_ : Tuple = arr[k - 1], arr[0]
generate(k - 1 , _lowerCAmelCase )
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 353
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_lowerCAmelCase : Optional[Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_lowerCAmelCase : Tuple = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
_lowerCAmelCase : List[str] = '''zero2'''
_lowerCAmelCase : Dict = '''zero3'''
_lowerCAmelCase : Tuple = [ZEROa, ZEROa]
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] ) -> Any:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
A_ : Dict = parameterized.to_safe_name("_".join(str(_lowerCAmelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
_lowerCAmelCase : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Tuple , snake_case :Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :int ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :bool = True , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : Any = models[model]
A_ : List[Any] = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def SCREAMING_SNAKE_CASE ( self :str , snake_case :str , snake_case :str , snake_case :int = 10 , snake_case :int = 1 , snake_case :bool = True , snake_case :bool = True , ):
'''simple docstring'''
A_ : List[Any] = self.get_auto_remove_tmp_dir("./xxx" , after=snake_case )
A_ : Tuple = f"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(snake_case )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
A_ : List[str] = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
A_ : List[str] = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
A_ : str = self.get_launcher(snake_case )
A_ : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Optional[Any]=False ):
'''simple docstring'''
A_ : int = min(2 , get_gpu_count() ) if distributed else 1
return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 70
| 0
|
'''simple docstring'''
from __future__ import annotations
a_ : str = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
a_ : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a_ ( __snake_case : list[float] ) -> list[float]:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =len(__snake_case )
for i in range(__snake_case ):
lowerCamelCase_ =-1
for j in range(i + 1 , __snake_case ):
if arr[i] < arr[j]:
lowerCamelCase_ =arr[j]
break
result.append(__snake_case )
return result
def a_ ( __snake_case : list[float] ) -> list[float]:
"""simple docstring"""
lowerCamelCase_ =[]
for i, outer in enumerate(__snake_case ):
lowerCamelCase_ =-1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase_ =inner
break
result.append(__snake_case )
return result
def a_ ( __snake_case : list[float] ) -> list[float]:
"""simple docstring"""
lowerCamelCase_ =len(__snake_case )
lowerCamelCase_ =[]
lowerCamelCase_ =[-1] * arr_size
for index in reversed(range(__snake_case ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase_ =stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
a_ : Dict = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 75
|
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a_ : List[Any] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def a_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json'''
lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys()
return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) )
def a_ ( ) -> str:
"""simple docstring"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase_ =Path(__snake_case ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]:
"""simple docstring"""
init_hf_modules()
lowerCamelCase_ =Path(__snake_case ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__snake_case , exist_ok=__snake_case )
lowerCamelCase_ =dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def a_ ( __snake_case : Tuple ) -> List[str]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.read()
# Imports of the form `import .xxx`
lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE )
# Unique-ify
return list(set(__snake_case ) )
def a_ ( __snake_case : str ) -> str:
"""simple docstring"""
lowerCamelCase_ =False
lowerCamelCase_ =[module_file]
lowerCamelCase_ =[]
# Let's recurse through all relative imports
while not no_change:
lowerCamelCase_ =[]
for f in files_to_check:
new_imports.extend(get_relative_imports(__snake_case ) )
lowerCamelCase_ =Path(__snake_case ).parent
lowerCamelCase_ =[str(module_path / m ) for m in new_imports]
lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports]
lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files]
lowerCamelCase_ =len(__snake_case ) == 0
all_relative_imports.extend(__snake_case )
return all_relative_imports
def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =f.read()
# Imports of the form `import xxx`
lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE )
# Only keep the top-level module
lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
lowerCamelCase_ =list(set(__snake_case ) )
lowerCamelCase_ =[]
for imp in imports:
try:
importlib.import_module(__snake_case )
except ImportError:
missing_packages.append(__snake_case )
if len(__snake_case ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' )
return get_relative_imports(__snake_case )
def a_ ( __snake_case : Tuple , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' )
lowerCamelCase_ =importlib.import_module(__snake_case )
if class_name is None:
return find_pipeline_class(__snake_case )
return getattr(__snake_case , __snake_case )
def a_ ( __snake_case : Dict ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) )
lowerCamelCase_ =None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __snake_case )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''' )
lowerCamelCase_ =cls
return pipeline_class
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =str(__snake_case )
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
if os.path.isfile(__snake_case ):
lowerCamelCase_ =module_file_or_url
lowerCamelCase_ ='''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
lowerCamelCase_ =get_diffusers_versions()
# cut ".dev0"
lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(F'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowerCamelCase_ =F'''v{revision}'''
elif revision == "main":
lowerCamelCase_ =revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'] )}.''' )
# community pipeline on GitHub
lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case )
try:
lowerCamelCase_ =cached_download(
__snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , )
lowerCamelCase_ ='''git'''
lowerCamelCase_ =pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowerCamelCase_ =hf_hub_download(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , )
lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowerCamelCase_ =check_imports(__snake_case )
# Now we move the module inside our cached dynamic modules.
lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__snake_case )
lowerCamelCase_ =Path(__snake_case ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__snake_case , submodule_path / module_file )
for module_needed in modules_needed:
lowerCamelCase_ =F'''{module_needed}.py'''
shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__snake_case , __snake_case ):
lowerCamelCase_ =use_auth_token
elif use_auth_token is True:
lowerCamelCase_ =HfFolder.get_token()
else:
lowerCamelCase_ =None
lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCamelCase_ =submodule_path / commit_hash
lowerCamelCase_ =full_submodule + os.path.sep + commit_hash
create_dynamic_module(__snake_case )
if not (submodule_path / module_file).exists():
shutil.copy(__snake_case , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
return os.path.join(__snake_case , __snake_case )
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ =get_cached_module_file(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
| 75
| 1
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
_UpperCAmelCase : Union[str, Any] = int(__a )
if n_element < 1:
_UpperCAmelCase : Dict = ValueError("a should be a positive number" )
raise my_error
_UpperCAmelCase : Dict = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = (0, 0, 0)
_UpperCAmelCase : List[str] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2, hamming_list[j] * 3, hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__a = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
__a = hamming(int(n))
print('-----------------------------------------------------')
print(f'The list with nth numbers is: {hamming_numbers}')
print('-----------------------------------------------------')
| 371
|
'''simple docstring'''
from math import factorial
def __UpperCAmelCase ( a_: int = 100 ):
return sum(map(a_, str(factorial(a_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 17
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class A ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
A = "switch_transformers"
A = ["past_key_values"]
A = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__(self , _UpperCAmelCase=3_2_1_2_8 , _UpperCAmelCase=7_6_8 , _UpperCAmelCase=6_4 , _UpperCAmelCase=2_0_4_8 , _UpperCAmelCase=6_4 , _UpperCAmelCase=1_2 , _UpperCAmelCase=3 , _UpperCAmelCase=1_2 , _UpperCAmelCase=3 , _UpperCAmelCase=1_2 , _UpperCAmelCase=8 , _UpperCAmelCase=False , _UpperCAmelCase=0.01 , _UpperCAmelCase="float32" , _UpperCAmelCase=False , _UpperCAmelCase=3_2 , _UpperCAmelCase=1_2_8 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.001 , _UpperCAmelCase=0.001 , _UpperCAmelCase=1.0 , _UpperCAmelCase="relu" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , **_UpperCAmelCase , ) -> int:
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : List[str] = d_model
__UpperCamelCase : Any = d_kv
__UpperCamelCase : Optional[int] = d_ff
__UpperCamelCase : str = num_sparse_encoder_layers
__UpperCamelCase : Dict = num_layers
__UpperCamelCase : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCamelCase : Dict = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__UpperCamelCase : Optional[int] = self.num_layers // self.num_sparse_encoder_layers
else:
__UpperCamelCase : Optional[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__UpperCamelCase : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__UpperCamelCase : Tuple = self.num_decoder_layers # HACK: this will create 0 sparse layers
__UpperCamelCase : int = num_heads
__UpperCamelCase : Any = num_experts
__UpperCamelCase : List[str] = expert_capacity
__UpperCamelCase : List[Any] = router_bias
__UpperCamelCase : int = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}" )
__UpperCamelCase : Optional[Any] = router_dtype
__UpperCamelCase : Optional[int] = router_ignore_padding_tokens
__UpperCamelCase : Union[str, Any] = relative_attention_num_buckets
__UpperCamelCase : List[Any] = relative_attention_max_distance
__UpperCamelCase : Optional[int] = dropout_rate
__UpperCamelCase : str = layer_norm_epsilon
__UpperCamelCase : List[Any] = initializer_factor
__UpperCamelCase : Dict = feed_forward_proj
__UpperCamelCase : List[Any] = use_cache
__UpperCamelCase : Optional[Any] = add_router_probs
__UpperCamelCase : Union[str, Any] = router_z_loss_coef
__UpperCamelCase : Dict = router_aux_loss_coef
__UpperCamelCase : Dict = self.feed_forward_proj.split("-" )
__UpperCamelCase : Union[str, Any] = act_info[-1]
__UpperCamelCase : List[str] = act_info[0] == 'gated'
if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCamelCase : int = 'gelu_new'
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , )
| 298
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> np.array:
__lowerCamelCase : Any = F'{sampling_rate}'
__lowerCamelCase : List[str] = '1'
__lowerCamelCase : int = 'f32le'
__lowerCamelCase : Dict = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_lowerCAmelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCamelCase : Tuple = ffmpeg_process.communicate(_lowerCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
__lowerCamelCase : Any = output_stream[0]
__lowerCamelCase : Union[str, Any] = np.frombuffer(_lowerCAmelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = "f32le" ,) -> Dict:
__lowerCamelCase : Optional[Any] = F'{sampling_rate}'
__lowerCamelCase : Optional[int] = '1'
if format_for_conversion == "s16le":
__lowerCamelCase : List[Any] = 2
elif format_for_conversion == "f32le":
__lowerCamelCase : Tuple = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
__lowerCamelCase : Any = platform.system()
if system == "Linux":
__lowerCamelCase : Tuple = 'alsa'
__lowerCamelCase : Optional[Any] = 'default'
elif system == "Darwin":
__lowerCamelCase : Union[str, Any] = 'avfoundation'
__lowerCamelCase : Tuple = ':0'
elif system == "Windows":
__lowerCamelCase : List[str] = 'dshow'
__lowerCamelCase : Optional[Any] = 'default'
__lowerCamelCase : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
__lowerCamelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCamelCase : int = _ffmpeg_stream(_lowerCAmelCase ,_lowerCAmelCase )
for item in iterator:
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "f32le" ,) -> List[str]:
if stream_chunk_s is not None:
__lowerCamelCase : int = stream_chunk_s
else:
__lowerCamelCase : List[Any] = chunk_length_s
__lowerCamelCase : Dict = ffmpeg_microphone(_lowerCAmelCase ,_lowerCAmelCase ,format_for_conversion=_lowerCAmelCase )
if format_for_conversion == "s16le":
__lowerCamelCase : List[str] = np.intaa
__lowerCamelCase : Union[str, Any] = 2
elif format_for_conversion == "f32le":
__lowerCamelCase : Union[str, Any] = np.floataa
__lowerCamelCase : Optional[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
__lowerCamelCase : Any = chunk_length_s / 6
__lowerCamelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCAmelCase ,(int, float) ):
__lowerCamelCase : Tuple = [stride_length_s, stride_length_s]
__lowerCamelCase : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCamelCase : Dict = datetime.datetime.now()
__lowerCamelCase : Any = datetime.timedelta(seconds=_lowerCAmelCase )
for item in chunk_bytes_iter(_lowerCAmelCase ,_lowerCAmelCase ,stride=(stride_left, stride_right) ,stream=_lowerCAmelCase ):
# Put everything back in numpy scale
__lowerCamelCase : Optional[int] = np.frombuffer(item['raw'] ,dtype=_lowerCAmelCase )
__lowerCamelCase : Tuple = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
__lowerCamelCase : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = False ) -> str:
__lowerCamelCase : Optional[int] = b''
__lowerCamelCase ,__lowerCamelCase : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
__lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCAmelCase ) < chunk_len:
__lowerCamelCase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCAmelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCamelCase : Any = (_stride_left, stride_right)
__lowerCamelCase : Optional[int] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
__lowerCamelCase : List[str] = False
yield item
__lowerCamelCase : Tuple = stride_left
__lowerCamelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCAmelCase ) > stride_left:
__lowerCamelCase : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
__lowerCamelCase : List[str] = False
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Tuple:
__lowerCamelCase : int = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCAmelCase ,stdout=subprocess.PIPE ,bufsize=_lowerCAmelCase ) as ffmpeg_process:
while True:
__lowerCamelCase : Union[str, Any] = ffmpeg_process.stdout.read(_lowerCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 208
| 0
|
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case , snake_case) -> Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[str] =module
_UpperCAmelCase : Optional[int] =nn.Sequential(
nn.Linear(module.in_features , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE) , nn.Linear(_SCREAMING_SNAKE_CASE , module.out_features , bias=_SCREAMING_SNAKE_CASE) , )
_UpperCAmelCase : Tuple =(2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_SCREAMING_SNAKE_CASE)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def lowerCAmelCase ( self , snake_case , *snake_case , **snake_case) -> Tuple:
'''simple docstring'''
return self.module(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE) + self.adapter(_SCREAMING_SNAKE_CASE)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase ='bigscience/bloom-1b7'
# Constant values
UpperCAmelCase =2.109659552692574
UpperCAmelCase ='Hello my name is'
UpperCAmelCase =set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
UpperCAmelCase =1_0
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
# Models and tokenizer
_UpperCAmelCase : Tuple =AutoTokenizer.from_pretrained(self.model_name)
class __magic_name__ ( _UpperCamelCase ):
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
super().setUp()
# Models and tokenizer
_UpperCAmelCase : Union[str, Any] =AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
_UpperCAmelCase : List[str] =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.model_abit.config
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'quantization_config'))
_UpperCAmelCase : Union[str, Any] =config.to_dict()
_UpperCAmelCase : Union[str, Any] =config.to_diff_dict()
_UpperCAmelCase : Optional[Any] =config.to_json_string()
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
_UpperCAmelCase : Optional[Any] =self.model_fpaa.get_memory_footprint()
_UpperCAmelCase : Tuple =self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
_UpperCAmelCase : Optional[Any] =get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.tokenizer(self.input_text , return_tensors='pt')
_UpperCAmelCase : int =self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =BitsAndBytesConfig()
_UpperCAmelCase : Dict =True
_UpperCAmelCase : Any =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , device_map='auto')
_UpperCAmelCase : List[str] =self.tokenizer(self.input_text , return_tensors='pt')
_UpperCAmelCase : Optional[Any] =model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_SCREAMING_SNAKE_CASE)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =BitsAndBytesConfig()
with self.assertRaises(_SCREAMING_SNAKE_CASE):
_UpperCAmelCase : int =AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_SCREAMING_SNAKE_CASE , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto' , bnb_abit_quant_type='nf4' , )
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(_SCREAMING_SNAKE_CASE):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(_SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(_SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_UpperCAmelCase : Optional[int] =self.tokenizer(self.input_text , return_tensors='pt')
_UpperCAmelCase : List[str] =self.model_fpaa.to(torch.floataa)
_UpperCAmelCase : str =self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
# Check this does not throw an error
_UpperCAmelCase : str =self.model_fpaa.to('cpu')
# Check this does not throw an error
_UpperCAmelCase : Tuple =self.model_fpaa.half()
# Check this does not throw an error
_UpperCAmelCase : Optional[Any] =self.model_fpaa.float()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __magic_name__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase ( cls) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict ='t5-small'
_UpperCAmelCase : Dict ='google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_UpperCAmelCase : List[Any] =AutoTokenizer.from_pretrained(cls.model_name)
_UpperCAmelCase : Tuple ='Translate in German: Hello, my dog is cute'
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
from transformers import TaForConditionalGeneration
_UpperCAmelCase : Any =TaForConditionalGeneration._keep_in_fpaa_modules
_UpperCAmelCase : int =None
# test with `t5-small`
_UpperCAmelCase : str =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
_UpperCAmelCase : Tuple =self.tokenizer(self.input_text , return_tensors='pt').to(0)
_UpperCAmelCase : Optional[int] =model.generate(**_SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_UpperCAmelCase : Optional[Any] =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
_UpperCAmelCase : int =self.tokenizer(self.input_text , return_tensors='pt').to(0)
_UpperCAmelCase : Dict =model.generate(**_SCREAMING_SNAKE_CASE)
_UpperCAmelCase : str =modules
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_UpperCAmelCase : Any =TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
_UpperCAmelCase : Dict =self.tokenizer(self.input_text , return_tensors='pt').to(0)
_UpperCAmelCase : Union[str, Any] =model.generate(**_SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_UpperCAmelCase : str =TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
_UpperCAmelCase : Union[str, Any] =self.tokenizer(self.input_text , return_tensors='pt').to(0)
_UpperCAmelCase : List[str] =model.generate(**_SCREAMING_SNAKE_CASE)
class __magic_name__ ( _UpperCamelCase ):
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
super().setUp()
# model_name
_UpperCAmelCase : str ='bigscience/bloom-560m'
_UpperCAmelCase : List[str] ='t5-small'
# Different types of model
_UpperCAmelCase : Optional[int] =AutoModel.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
# Sequence classification model
_UpperCAmelCase : List[Any] =AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
# CausalLM model
_UpperCAmelCase : str =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
# Seq2seq model
_UpperCAmelCase : str =AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='auto')
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class __magic_name__ ( _UpperCamelCase ):
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
super().setUp()
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_UpperCAmelCase : Tuple =self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class __magic_name__ ( _UpperCamelCase ):
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
super().setUp()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple =AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
_UpperCAmelCase : int =self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
_UpperCAmelCase : Optional[Any] =model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=1_0)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
class __magic_name__ ( _UpperCamelCase ):
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Tuple ='facebook/opt-350m'
super().setUp()
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
_UpperCAmelCase : int =AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_SCREAMING_SNAKE_CASE)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
_UpperCAmelCase : Dict =False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_UpperCAmelCase : Union[str, Any] =param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_SCREAMING_SNAKE_CASE)):
_UpperCAmelCase : Union[str, Any] =LoRALayer(module.q_proj , rank=1_6)
_UpperCAmelCase : List[str] =LoRALayer(module.k_proj , rank=1_6)
_UpperCAmelCase : Optional[Any] =LoRALayer(module.v_proj , rank=1_6)
# Step 3: dummy batch
_UpperCAmelCase : Dict =self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_UpperCAmelCase : Tuple =model.forward(**_SCREAMING_SNAKE_CASE)
out.logits.norm().backward()
for module in model.modules():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(_SCREAMING_SNAKE_CASE , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class __magic_name__ ( _UpperCamelCase ):
UpperCAmelCase ='gpt2-xl'
UpperCAmelCase =3.3191854854152187
| 368
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , **snake_case) -> Optional[int]:
'''simple docstring'''
super().__init__(**snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , snake_case , **snake_case) -> str:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : str ={}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Union[str, Any] =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : List[Any] =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , snake_case , snake_case=None , snake_case="This is a photo of {}.") -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =load_image(snake_case)
_UpperCAmelCase : Union[str, Any] =self.image_processor(images=[image] , return_tensors=self.framework)
_UpperCAmelCase : Union[str, Any] =candidate_labels
_UpperCAmelCase : List[Any] =[hypothesis_template.format(snake_case) for x in candidate_labels]
_UpperCAmelCase : str =self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case)
_UpperCAmelCase : Any =[text_inputs]
return inputs
def lowerCAmelCase ( self , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] =model_inputs.pop('candidate_labels')
_UpperCAmelCase : Tuple =model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , snake_case):
_UpperCAmelCase : Any =text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : str =text_inputs[0][0]
_UpperCAmelCase : Any =self.model(**snake_case , **snake_case)
_UpperCAmelCase : List[str] ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str =model_outputs.pop('candidate_labels')
_UpperCAmelCase : Union[str, Any] =model_outputs['logits'][0]
if self.framework == "pt":
_UpperCAmelCase : Dict =logits.softmax(dim=-1).squeeze(-1)
_UpperCAmelCase : Union[str, Any] =probs.tolist()
if not isinstance(snake_case , snake_case):
_UpperCAmelCase : Union[str, Any] =[scores]
elif self.framework == "tf":
_UpperCAmelCase : Dict =stable_softmax(snake_case , axis=-1)
_UpperCAmelCase : str =probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : List[str] =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case , snake_case) , key=lambda snake_case: -x[0])
]
return result
| 242
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
A ={
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class _a ( __snake_case ):
__a : str = "gpt_bigcode"
__a : str = ["past_key_values"]
__a : Any = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , lowercase : List[Any]=50_257 , lowercase : Optional[int]=1_024 , lowercase : Any=768 , lowercase : Tuple=12 , lowercase : Optional[int]=12 , lowercase : Tuple=None , lowercase : int="gelu_pytorch_tanh" , lowercase : Tuple=0.1 , lowercase : str=0.1 , lowercase : str=0.1 , lowercase : List[str]=1E-5 , lowercase : Any=0.02 , lowercase : Optional[Any]=True , lowercase : Dict=True , lowercase : List[Any]=50_256 , lowercase : Optional[int]=50_256 , lowercase : List[str]=True , lowercase : List[str]=True , lowercase : List[str]=True , **lowercase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = n_inner
UpperCAmelCase = activation_function
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = scale_attn_weights
UpperCAmelCase = use_cache
UpperCAmelCase = attention_softmax_in_fpaa
UpperCAmelCase = scale_attention_softmax_in_fpaa
UpperCAmelCase = multi_query
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
| 34
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCamelCase_ = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase_ : Dict = bs[:]
UpperCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : Any = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = set()
UpperCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Optional[int] = char
return pairs
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : str = VOCAB_FILES_NAMES
A__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self: Union[str, Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any]="replace" ,lowerCamelCase_: Optional[Any]="<s>" ,lowerCamelCase_: List[Any]="</s>" ,lowerCamelCase_: List[str]="</s>" ,lowerCamelCase_: int="<s>" ,lowerCamelCase_: int="<unk>" ,lowerCamelCase_: str="<pad>" ,lowerCamelCase_: Optional[Any]="<mask>" ,lowerCamelCase_: List[str]=False ,**lowerCamelCase_: Tuple ,) -> Any:
UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
UpperCAmelCase_ : int = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
UpperCAmelCase_ : List[str] = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase_ : Union[str, Any] = json.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Any = errors # how to handle errors in decoding
UpperCAmelCase_ : int = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase_ : int = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Union[str, Any] = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self: List[str] ) -> List[str]:
return len(self.encoder )
def A__ ( self: Any ) -> Union[str, Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def A__ ( self: Tuple ,lowerCamelCase_: Dict ) -> Optional[Any]:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : Union[str, Any] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCAmelCase_ : Union[str, Any] = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ : Any = bigram
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[str] = 0
while i < len(lowerCamelCase_ ):
try:
UpperCAmelCase_ : str = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[str] = tuple(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCAmelCase_ : List[str] = get_pairs(lowerCamelCase_ )
UpperCAmelCase_ : int = """ """.join(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = word
return word
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Tuple ) -> List[str]:
UpperCAmelCase_ : str = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
UpperCAmelCase_ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ) -> Optional[int]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def A__ ( self: List[str] ,lowerCamelCase_: str ) -> Optional[Any]:
return self.decoder.get(lowerCamelCase_ )
def A__ ( self: List[str] ,lowerCamelCase_: List[str] ) -> List[Any]:
UpperCAmelCase_ : str = """""".join(lowerCamelCase_ )
UpperCAmelCase_ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def A__ ( self: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase_ : List[str] = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
UpperCAmelCase_ : str = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase_ : Tuple = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
UpperCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self: Union[str, Any] ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ,lowerCamelCase_: bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self: str ,lowerCamelCase_: List[int] ,lowerCamelCase_: Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase_ : Optional[Any] = [self.sep_token_id]
UpperCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str=False ,**lowerCamelCase_: List[str] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : Dict = """ """ + text
return (text, kwargs)
def A__ ( self: List[str] ,lowerCamelCase_: Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[bool] = None ,) -> dict:
UpperCAmelCase_ : Optional[int] = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ : str = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCAmelCase_ : Dict = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ : str = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ : List[str] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 345
| 0
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowerCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__lowerCAmelCase = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
__lowerCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCAmelCase = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__lowerCAmelCase = '''allenai'''
def snake_case_ ( snake_case ) -> Dict:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase__: Optional[Any] = dict((re.sub(R'@@$' , '' , snake_case ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , snake_case ), v) for k, v in d.items() )
lowercase__: Any = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
lowercase__: Any = d[k] # restore
return da
def snake_case_ ( snake_case , snake_case ) -> Optional[Any]:
# prep
assert os.path.exists(snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__: Optional[Any] = basename(snake_case )
lowercase__: Union[str, Any] = dirname(snake_case )
lowercase__: Optional[int] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__: List[Any] = cls.hub_models()
lowercase__: Union[str, Any] = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowercase__: Optional[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'using checkpoint {checkpoint_file}' )
lowercase__: str = hub_utils.from_pretrained(
snake_case , snake_case , snake_case , archive_map=snake_case , **snake_case )
lowercase__: int = vars(chkpt['args']['model'] )
lowercase__: Optional[Any] = args['source_lang']
lowercase__: Dict = args['target_lang']
lowercase__: List[str] = dirname(snake_case )
lowercase__: Any = basename(snake_case )
# dicts
lowercase__: int = os.path.join(snake_case , f'dict.{src_lang}.txt' )
lowercase__: str = os.path.join(snake_case , f'dict.{tgt_lang}.txt' )
lowercase__: List[str] = Dictionary.load(snake_case )
lowercase__: Optional[int] = rewrite_dict_keys(src_dict.indices )
lowercase__: Optional[int] = len(snake_case )
lowercase__: Union[str, Any] = os.path.join(snake_case , 'vocab-src.json' )
print(f'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__: Optional[Any] = True
for k in src_vocab.keys():
if not k.islower():
lowercase__: Tuple = False
break
lowercase__: List[Any] = Dictionary.load(snake_case )
lowercase__: Tuple = rewrite_dict_keys(tgt_dict.indices )
lowercase__: Dict = len(snake_case )
lowercase__: Optional[int] = os.path.join(snake_case , 'vocab-tgt.json' )
print(f'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
lowercase__: Union[str, Any] = os.path.join(snake_case , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__: Tuple = os.path.join(snake_case , snake_case )
if os.path.exists(snake_case ):
break
with open(snake_case , encoding='utf-8' ) as fin:
lowercase__: Optional[int] = fin.read()
lowercase__: List[str] = re.sub(R' \d+$' , '' , snake_case , 0 , re.M ) # remove frequency number
print(f'Generating {merges_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as fout:
fout.write(snake_case )
# model config
lowercase__: Any = os.path.join(snake_case , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", f'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowercase__: Optional[Any] = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.0_2,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowercase__: Union[str, Any] = 5
lowercase__: List[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__: Tuple = best_score_hparams[model_dir]['length_penalty']
else:
lowercase__: Union[str, Any] = 1.0
print(f'Generating {fsmt_model_config_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
lowercase__: Optional[int] = os.path.join(snake_case , snake_case )
lowercase__: Any = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f'Generating {fsmt_tokenizer_config_file}' )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
lowercase__: int = chkpt['models'][0]
lowercase__: Optional[int] = model.state_dict()
# rename keys to start with 'model.'
lowercase__: List[str] = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__: int = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
lowercase__: Dict = FSMTConfig.from_pretrained(snake_case )
lowercase__: Union[str, Any] = FSMTForConditionalGeneration(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case , strict=snake_case )
# save
lowercase__: Tuple = os.path.join(snake_case , snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f'cd {data_root}' )
print(f'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 368
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
__lowerCAmelCase = {'''vinai/bartpho-syllable''': 10_24}
class __a ( __UpperCamelCase ):
__lowercase : int = VOCAB_FILES_NAMES
__lowercase : str = PRETRAINED_VOCAB_FILES_MAP
__lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
lowercase__: Dict = vocab_file
lowercase__: str = monolingual_vocab_file
lowercase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__: List[Any] = {}
lowercase__: Optional[int] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: str = cnt
cnt += 1
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
lowercase__: Optional[Any] = line.strip().split()[0]
lowercase__: Optional[Any] = len(self.fairseq_tokens_to_ids )
if str(lowerCAmelCase__ ) not in self.fairseq_tokens_to_ids:
lowercase__: Optional[int] = len(self.fairseq_tokens_to_ids )
lowercase__: Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: Tuple = self.__dict__.copy()
lowercase__: Tuple = None
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__: Union[str, Any] = {}
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: Optional[int] = [self.cls_token_id]
lowercase__: Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: Dict = [self.sep_token_id]
lowercase__: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Union[str, Any] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = ''.join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: int = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
lowercase__: Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCAmelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'{str(lowerCAmelCase__ )} \n' )
return out_vocab_file, out_monolingual_vocab_file
| 288
| 0
|
'''simple docstring'''
def _A ( snake_case ) -> int:
_lowercase : str = len(snake_case )
_lowercase : Optional[int] = len(matrix[0] )
_lowercase : Any = min(snake_case , snake_case )
for row in range(snake_case ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case ):
_lowercase : Optional[Any] = matrix[col][row] / matrix[row][row]
for i in range(snake_case , snake_case ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_lowercase : int = True
for i in range(row + 1 , snake_case ):
if matrix[i][row] != 0:
_lowercase , _lowercase : Union[str, Any] = matrix[i], matrix[row]
_lowercase : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(snake_case ):
_lowercase : Tuple = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
|
'''simple docstring'''
class a__ :
def __init__( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Tuple = n
_lowercase : Any = [None] * self.n
_lowercase : Tuple = 0 # index of the first element
_lowercase : Union[str, Any] = 0
_lowercase : str = 0
def __len__( self ):
"""simple docstring"""
return self.size
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.size == 0
def _lowerCamelCase ( self ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
_lowercase : Optional[int] = data
_lowercase : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.size == 0:
raise Exception("UNDERFLOW" )
_lowercase : Optional[Any] = self.array[self.front]
_lowercase : List[Any] = None
_lowercase : int = (self.front + 1) % self.n
self.size -= 1
return temp
| 250
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=_snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ["""torch""", """scipy"""]
def __init__( self : Any , *lowercase_ : Optional[int] , **lowercase_ : Any ) -> str:
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] , *lowercase_ : int , **lowercase_ : Optional[int] ) -> Any:
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def UpperCAmelCase_ ( cls : Any , *lowercase_ : List[str] , **lowercase_ : Tuple ) -> List[Any]:
requires_backends(cls , ['torch', 'scipy'] )
| 280
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase__ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCamelCase( UpperCAmelCase_ ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCAmelCase : List[Any] = list(s_dict.keys() )
for key in keys:
UpperCAmelCase : Union[str, Any] = R'.*/layers_(\d+)'
UpperCAmelCase : List[str] = key
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ )
UpperCAmelCase : str = R'(encoder|decoder)\/'
if re.match(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups()
if groups[0] == "encoder":
UpperCAmelCase : Union[str, Any] = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : List[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ )
elif groups[0] == "decoder":
UpperCAmelCase : Tuple = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ )
UpperCAmelCase : Union[str, Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCAmelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""{key} -> {new_key}""" )
UpperCAmelCase : List[Any] = s_dict.pop(UpperCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCAmelCase : Optional[int] = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCAmelCase : List[str] = s_dict[key].shape[0]
UpperCAmelCase : List[Any] = s_dict[key]
for idx in range(UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = expert_weihts[idx]
print(F"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(UpperCAmelCase_ )
return s_dict
lowercase__ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(UpperCAmelCase_ , 'r' ) as f:
UpperCAmelCase : Union[str, Any] = f.read()
UpperCAmelCase : Union[str, Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ )
UpperCAmelCase : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCAmelCase : Dict = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ )
UpperCAmelCase : str = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0]
UpperCAmelCase : Union[str, Any] = str(activation[1] )
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : List[str] = SwitchTransformersConfig(**UpperCAmelCase_ )
return config
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_="./" , UpperCAmelCase_=8 ):
# Initialise PyTorch model
print(F"""Loading flax weights from : {flax_checkpoint_path}""" )
UpperCAmelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ )
if gin_file is not None:
UpperCAmelCase : List[Any] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCAmelCase : str = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ )
UpperCAmelCase : str = flax_params['target']
UpperCAmelCase : Union[str, Any] = flatten_dict(UpperCAmelCase_ , sep='/' )
UpperCAmelCase : Tuple = rename_keys(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = unflatten_dict(UpperCAmelCase_ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ )
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase__ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 280
| 1
|
def lowerCAmelCase__ ( ) -> Any:
'''simple docstring'''
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any:
'''simple docstring'''
A__ = 1
A__ = 2
while i * i <= n:
A__ = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
return next(i for i in triangle_number_generator() if count_divisors(SCREAMING_SNAKE_CASE_ ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 68
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
a__: Optional[int] = logging.get_logger(__name__)
a__: int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__: Optional[Any] = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
a__: List[str] = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
a__: Optional[Any] = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BertTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase="[UNK]",__lowerCamelCase="[SEP]",__lowerCamelCase="[PAD]",__lowerCamelCase="[CLS]",__lowerCamelCase="[MASK]",__lowerCamelCase=True,__lowerCamelCase=None,**__lowerCamelCase,):
super().__init__(
__lowerCamelCase,tokenizer_file=__lowerCamelCase,do_lower_case=__lowerCamelCase,unk_token=__lowerCamelCase,sep_token=__lowerCamelCase,pad_token=__lowerCamelCase,cls_token=__lowerCamelCase,mask_token=__lowerCamelCase,tokenize_chinese_chars=__lowerCamelCase,strip_accents=__lowerCamelCase,**__lowerCamelCase,)
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''',__lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''',__lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''',__lowerCamelCase ) != tokenize_chinese_chars
):
A__ = getattr(__lowerCamelCase,normalizer_state.pop('''type''' ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**__lowerCamelCase )
A__ = do_lower_case
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = self._tokenizer.model.save(__lowerCamelCase,name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 193
| 0
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
UpperCamelCase_ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
UpperCamelCase_ = [ord(letter) for letter in string.ascii_lowercase]
UpperCamelCase_ = {ord(char) for char in VALID_CHARS}
UpperCamelCase_ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str | None:
'''simple docstring'''
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ):
UpperCAmelCase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__UpperCAmelCase )
return decoded
def A ( __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
UpperCAmelCase_ = []
for key in product(__UpperCAmelCase , repeat=3 ):
UpperCAmelCase_ = try_key(__UpperCAmelCase , __UpperCAmelCase )
if encoded is not None:
possibles.append(__UpperCAmelCase )
return possibles
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def A ( __UpperCAmelCase = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
UpperCAmelCase_ = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='''utf-8''' )
UpperCAmelCase_ = [int(__UpperCAmelCase ) for number in data.strip().split(''',''' )]
UpperCAmelCase_ = filter_valid_chars(__UpperCAmelCase )
for common_word in COMMON_WORDS:
UpperCAmelCase_ = filter_common_word(__UpperCAmelCase , __UpperCAmelCase )
if len(__UpperCAmelCase ) == 1:
break
UpperCAmelCase_ = possibles[0]
return sum(ord(__UpperCAmelCase ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 362
|
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class a_ ( nn.Module ):
def __init__( self :Optional[Any]) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :Dict , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( _snake_case ):
def __a ( self :Tuple , _lowercase :Optional[int] , *_lowercase :Union[str, Any] , **_lowercase :Any) -> Optional[Any]:
return (args[0] + 1,) + args[1:], kwargs
class a_ ( _snake_case ):
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :Tuple) -> int:
return output + 1
class a_ ( unittest.TestCase ):
def __a ( self :str) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
self.assertEqual(test_model._hf_hook , _lowercase)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[Any]) -> Any:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_lowercase , _lowercase)
add_hook_to_module(_lowercase , _lowercase , append=_lowercase)
self.assertEqual(isinstance(test_model._hf_hook , _lowercase) , _lowercase)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_lowercase , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_lowercase)
self.assertFalse(hasattr(_lowercase , '''_hf_hook'''))
self.assertFalse(hasattr(_lowercase , '''_old_forward'''))
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , _lowercase , atol=1E-5)
def __a ( self :List[str]) -> int:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
assert torch.allclose(_lowercase , output + 2 , atol=1E-5)
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_lowercase)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase)
UpperCAmelCase_ = test_model(_lowercase)
self.assertTrue(torch.allclose(_lowercase , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_lowercase)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def __a ( self :Tuple) -> Optional[int]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , torch.device(0))
def __a ( self :str) -> List[Any]:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase))
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def __a ( self :Optional[Any]) -> int:
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_lowercase)
self.assertEqual(model.batchnorm.running_mean.device , _lowercase)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_lowercase)
self.assertEqual(output.device , _lowercase)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 344
| 0
|
from ...processing_utils import ProcessorMixin
class _lowerCamelCase( _a ):
lowercase_ : List[Any] = """WhisperFeatureExtractor"""
lowercase_ : List[str] = """WhisperTokenizer"""
def __init__( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
super().__init__(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = self.feature_extractor
_lowercase : int = False
def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=True) -> List[str]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase, language=lowerCamelCase, no_timestamps=lowerCamelCase)
def __call__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase, **lowerCamelCase)
_lowercase : Any = kwargs.pop('audio', lowerCamelCase)
_lowercase : Union[str, Any] = kwargs.pop('sampling_rate', lowerCamelCase)
_lowercase : List[str] = kwargs.pop('text', lowerCamelCase)
if len(lowerCamelCase) > 0:
_lowercase : int = args[0]
_lowercase : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if audio is not None:
_lowercase : Dict = self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase)
if text is not None:
_lowercase : Dict = self.tokenizer(lowerCamelCase, **lowerCamelCase)
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase : Union[str, Any] = encodings['input_ids']
return inputs
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="np") -> str:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(lowerCamelCase, return_tensors=lowerCamelCase)
| 21
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowerCamelCase( _a ):
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=64, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=16, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=4, lowerCamelCase=None, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=1, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = parent
_lowercase : Optional[Any] = batch_size
_lowercase : Any = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : Optional[Any] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : List[str] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_act
_lowercase : int = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : Union[str, Any] = type_vocab_size
_lowercase : List[Any] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : List[str] = num_labels
_lowercase : Any = num_choices
_lowercase : Tuple = scope
_lowercase : Optional[Any] = q_groups
_lowercase : List[str] = k_groups
_lowercase : Optional[int] = v_groups
_lowercase : List[str] = post_attention_groups
_lowercase : Union[str, Any] = intermediate_groups
_lowercase : int = output_groups
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : Dict = None
_lowercase : int = None
_lowercase : List[Any] = None
if self.use_labels:
_lowercase : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : List[str] = SqueezeBertModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase, lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Dict = SqueezeBertForMaskedLM(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForQuestionAnswering(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : List[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_labels
_lowercase : int = SqueezeBertForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Any = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : List[str] = SqueezeBertForTokenClassification(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : str = SqueezeBertForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = model(
lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)) : Dict = config_and_inputs
_lowercase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Union[str, Any] = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase_ : Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Tuple = False
lowercase_ : List[str] = True
lowercase_ : int = False
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = SqueezeBertModelTester(self)
_lowercase : Dict = ConfigTester(self, config_class=lowerCamelCase, dim=37)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : List[Any] = SqueezeBertModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
_lowercase : Optional[int] = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]])
_lowercase : List[str] = model(lowerCamelCase)[0]
_lowercase : Union[str, Any] = torch.Size((1, 3))
self.assertEqual(output.shape, lowerCamelCase)
_lowercase : Tuple = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]])
self.assertTrue(torch.allclose(lowerCamelCase, lowerCamelCase, atol=1E-4))
| 21
| 1
|
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
A__ = [int(_UpperCAmelCase ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(_UpperCAmelCase ) == 4 and all(0 <= int(_UpperCAmelCase ) <= 254 for octet in octets )
if __name__ == "__main__":
lowercase_ = input().strip()
lowercase_ = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 351
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = VideoToVideoSDPipeline
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {'image', 'width', 'height'}
lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {'image'}
lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase = False
# No `output_type`.
lowerCamelCase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def snake_case__ ( self : Tuple )-> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4),layers_per_block=2,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D'),up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D'),cross_attention_dim=3_2,attention_head_dim=4,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int],lowercase_ : List[Any]=0 )-> Any:
'''simple docstring'''
A__ = floats_tensor((1, 3, 3, 3_2, 3_2),rng=random.Random(lowercase_ ) ).to(lowercase_ )
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.get_dummy_components()
A__ = VideoToVideoSDPipeline(**lowercase_ )
A__ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = 'np'
A__ = sd_pipe(**lowercase_ ).frames
A__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
A__ = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available(),reason='XFormers attention is only available with CUDA and `xformers` installed',)
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase_,expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : Any )-> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def snake_case__ ( self : int )-> int:
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def snake_case__ ( self : List[Any] )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] )-> Dict:
'''simple docstring'''
A__ = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL',torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A__ = torch.Generator(device='cpu' ).manual_seed(0 )
A__ = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6),generator=lowercase_ )
A__ = video.to('cuda' )
A__ = 'Spiderman is surfing'
A__ = pipe(lowercase_,video=lowercase_,generator=lowercase_,num_inference_steps=3,output_type='pt' ).frames
A__ = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 282
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class lowerCamelCase ( snake_case_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = '''falcon'''
SCREAMING_SNAKE_CASE = ['''past_key_values''']
def __init__(self , _lowerCamelCase=65024 , _lowerCamelCase=4544 , _lowerCamelCase=32 , _lowerCamelCase=71 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=11 , _lowerCamelCase=11 , **_lowerCamelCase , ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase__ : List[str] = kwargs.pop("""n_embed""" , __snake_case )
UpperCAmelCase__ : Optional[int] = hidden_size if n_embed is None else n_embed
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Optional[int] = layer_norm_epsilon
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : List[Any] = use_cache
UpperCAmelCase__ : Tuple = hidden_dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : Dict = bos_token_id
UpperCAmelCase__ : Optional[int] = eos_token_id
UpperCAmelCase__ : List[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase__ : List[Any] = alibi
UpperCAmelCase__ : Any = new_decoder_architecture
UpperCAmelCase__ : Any = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase__ : Optional[int] = parallel_attn
UpperCAmelCase__ : Tuple = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def _a (self ):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def _a (self ):
"""simple docstring"""
return not self.alibi
| 171
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
_lowercase: Optional[datasets.Features] = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
_lowercase: Tuple = PandasConfig
def lowercase__ ( self : Optional[Any] ) -> str:
return datasets.DatasetInfo(features=self.config.features )
def lowercase__ ( self : List[str] , __snake_case : Dict ) -> int:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__snake_case , (str, list, tuple) ):
_lowerCAmelCase = data_files
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(__snake_case , __snake_case ):
_lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase = [dl_manager.iter_files(__snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=__snake_case , gen_kwargs={"""files""": files} ) )
return splits
def lowercase__ ( self : List[Any] , __snake_case : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase = table_cast(__snake_case , self.config.features.arrow_schema )
return pa_table
def lowercase__ ( self : Dict , __snake_case : Optional[Any] ) -> Any:
for i, file in enumerate(itertools.chain.from_iterable(__snake_case ) ):
with open(__snake_case , """rb""" ) as f:
_lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(__snake_case ) )
yield i, self._cast_table(__snake_case )
| 70
| 0
|
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : list ) -> list:
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
_lowerCamelCase = []
def generate(lowercase_ : int , lowercase_ : list ):
_lowerCamelCase = [0] * n
res.append(tuple(lowercase_ ) )
_lowerCamelCase = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowerCamelCase , _lowerCamelCase = arr[i], arr[0]
else:
_lowerCamelCase , _lowerCamelCase = arr[i], arr[c[i]]
res.append(tuple(lowercase_ ) )
c[i] += 1
_lowerCamelCase = 0
else:
_lowerCamelCase = 0
i += 1
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE : int = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 73
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : str = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 73
| 1
|
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> int:
'''simple docstring'''
_UpperCAmelCase = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
_UpperCAmelCase = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> tuple[int, int]:
'''simple docstring'''
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("\n" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(__lowercase ))
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 22
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17
| 0
|
"""simple docstring"""
import os
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as f:
lowerCAmelCase_ : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
lowerCAmelCase_ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCAmelCase_ : Dict = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCAmelCase_ : Union[str, Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCAmelCase_ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCAmelCase_ : List[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCAmelCase_ : str = temp
return maximum
if __name__ == "__main__":
print(solution())
| 161
|
"""simple docstring"""
import qiskit
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCAmelCase_ : int = qiskit.Aer.get_backend("aer_simulator" )
lowerCAmelCase_ : List[Any] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
lowerCAmelCase_ : Tuple = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = half_adder(1, 1)
print(F"""Half Adder Output Qubit Counts: {counts}""")
| 161
| 1
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A: Any = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A: int = "main"
# Default branch name
A: Tuple = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
A: List[str] = "aaaaaaa"
# This commit does not exist, so we should 404.
A: Any = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
A: Optional[Any] = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def _snake_case ( ):
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def _snake_case ( ):
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""start_positions""", """end_positions"""] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
@require_tf
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""start_positions""", """end_positions"""] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , ["""labels"""] )
@require_flax
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(_SCREAMING_SNAKE_CASE ) , [] )
| 109
|
"""simple docstring"""
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = data
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
def _snake_case ( UpperCamelCase : Node | None ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _snake_case ( UpperCamelCase : Node | None ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _snake_case ( UpperCamelCase : Node ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _snake_case ( ): # Main function for testing.
UpperCAmelCase : int = Node(1 )
UpperCAmelCase : Tuple = Node(2 )
UpperCAmelCase : Any = Node(3 )
UpperCAmelCase : Optional[int] = Node(4 )
UpperCAmelCase : Any = Node(5 )
UpperCAmelCase : Optional[int] = Node(6 )
UpperCAmelCase : int = Node(7 )
UpperCAmelCase : str = Node(8 )
UpperCAmelCase : str = Node(9 )
print(is_full_binary_tree(UpperCamelCase ) )
print(depth_of_tree(UpperCamelCase ) )
print("""Tree is: """ )
display(UpperCamelCase )
if __name__ == "__main__":
main()
| 109
| 1
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case__ ( *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
pass
def a__ ( lowercase : Image ) -> str:
"""simple docstring"""
_UpperCamelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : str = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case__ ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = DepthEstimationPipeline(model=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , lowerCAmelCase__ )
import datasets
_UpperCamelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
_UpperCamelCase = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , lowerCAmelCase__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
@slow
@require_torch
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = '''Intel/dpt-large'''
_UpperCamelCase = pipeline('''depth-estimation''' , model=lowerCAmelCase__ )
_UpperCamelCase = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
_UpperCamelCase = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 371
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ : Any = logging.getLogger(__name__)
def a__ ( lowercase : Optional[Any], lowercase : Tuple ) -> Any:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_snake_case : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
_snake_case : str = field(metadata={'help': 'Should contain the data files for the task.'} )
_snake_case : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_snake_case : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', lowercase )
# Set seed
set_seed(training_args.seed )
try:
_UpperCamelCase = processors[data_args.task_name]()
_UpperCamelCase = processor.get_labels()
_UpperCamelCase = len(lowercase )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=lowercase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=lowercase, cache_dir=model_args.cache_dir, )
# Get datasets
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
_UpperCamelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=lowercase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
_UpperCamelCase = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(lowercase, p.label_ids )}
# Data collator
_UpperCamelCase = DataCollatorWithPadding(lowercase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCamelCase = Trainer(
model=lowercase, args=lowercase, train_dataset=lowercase, eval_dataset=lowercase, compute_metrics=lowercase, data_collator=lowercase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate()
_UpperCamelCase = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(lowercase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', lowercase, lowercase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(lowercase )
return results
def a__ ( lowercase : Tuple ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 287
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : List[str] = len(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = []
for i in range(len(__SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase_ : Tuple = True
for j in range(__SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase_ : List[str] = False
break
if match_found:
position.append(__SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 93
| 1
|
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
snake_case_ = [False] * len(__UpperCAmelCase )
snake_case_ = []
queue.append(__UpperCAmelCase )
snake_case_ = True
while queue:
snake_case_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCAmelCase )
snake_case_ = True
snake_case_ = u
return visited[t]
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
snake_case_ = [-1] * (len(__UpperCAmelCase ))
snake_case_ = 0
while bfs(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ):
snake_case_ = float('''Inf''' )
snake_case_ = sink
while s != source:
# Find the minimum value in select path
snake_case_ = min(__UpperCAmelCase, graph[parent[s]][s] )
snake_case_ = parent[s]
max_flow += path_flow
snake_case_ = sink
while v != source:
snake_case_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ = parent[v]
return max_flow
a : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a : Tuple = 0, 5
print(ford_fulkerson(graph, source, sink))
| 362
|
'''simple docstring'''
import math
from collections.abc import Callable
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> float:
'''simple docstring'''
snake_case_ = xa
snake_case_ = xa
while True:
if x_n == x_na or function(__UpperCAmelCase ) == function(__UpperCAmelCase ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
snake_case_ = x_na - (
function(__UpperCAmelCase ) / ((function(__UpperCAmelCase ) - function(__UpperCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
snake_case_ = x_na
snake_case_ = x_na
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(__UpperCAmelCase, 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 72
| 0
|
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCAmelCase (_UpperCamelCase=32 , _UpperCamelCase=10 , _UpperCamelCase=100 , _UpperCamelCase=1026 , _UpperCamelCase=True , _UpperCamelCase="data/tokenized_stories_train_wikitext103.jbl" , _UpperCamelCase="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = generate_datasets(
_UpperCamelCase , _UpperCamelCase , number=_UpperCamelCase , min_len=1026 , trim=_UpperCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowerCAmelCase : List[Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__lowerCAmelCase : int = load_gpta('gpt2' ).to(_UpperCamelCase )
print('computing perplexity on objective set' )
__lowerCAmelCase : Optional[int] = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).item()
print('perplexity on objective set:' , _UpperCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=15 , _UpperCamelCase=128 , _UpperCamelCase=100 , _UpperCamelCase="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
__lowerCAmelCase : List[Any] = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__lowerCAmelCase : Dict = SecondaryLearner(_UpperCamelCase )
# Train secondary learner
__lowerCAmelCase : Optional[Any] = train_secondary_learner(
_UpperCamelCase , _UpperCamelCase , max_epochs=_UpperCamelCase , batch_size=_UpperCamelCase , eval_freq=100 , igf_model_path=_UpperCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=32 , _UpperCamelCase=1000 , _UpperCamelCase=16 , _UpperCamelCase=1.0 , _UpperCamelCase=recopy_gpta , _UpperCamelCase=None , _UpperCamelCase=10 , _UpperCamelCase="gpt2_finetuned.pt" , ):
__lowerCAmelCase : Any = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__lowerCAmelCase : Union[str, Any] = RandomSampler(_UpperCamelCase )
__lowerCAmelCase : Any = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase )
__lowerCAmelCase : str = max_steps // (len(_UpperCamelCase )) + 1
__lowerCAmelCase : int = 0
__lowerCAmelCase : int = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = recopy_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_UpperCamelCase )
secondary_learner.eval()
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Any = 0
__lowerCAmelCase : str = []
__lowerCAmelCase : str = []
# Compute the performance of the transformer model at the beginning
__lowerCAmelCase : List[str] = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print('Test perplexity, step' , _UpperCamelCase , ':' , _UpperCamelCase )
for epoch in range(int(_UpperCamelCase ) ):
for step, example in enumerate(_UpperCamelCase ):
torch.cuda.empty_cache()
__lowerCAmelCase : Dict = random.randint(0 , example.size(2 ) - context_len - 1 )
__lowerCAmelCase : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowerCAmelCase : List[Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
__lowerCAmelCase : Any = True
if secondary_learner is not None:
__lowerCAmelCase : List[Any] = secondary_learner.forward(
torch.tensor(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_UpperCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowerCAmelCase : List[Any] = -1
if predicted_q < threshold:
__lowerCAmelCase : Dict = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowerCAmelCase : int = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowerCAmelCase : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowerCAmelCase : Optional[Any] = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print('Test perplexity, step' , _UpperCamelCase , ':' , _UpperCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _UpperCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCAmelCase ():
__lowerCAmelCase : Tuple = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_UpperCamelCase , default=_UpperCamelCase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_UpperCamelCase , default=_UpperCamelCase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_UpperCamelCase , type=_UpperCamelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_UpperCamelCase , default=_UpperCamelCase , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=_UpperCamelCase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=_UpperCamelCase , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=_UpperCamelCase , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=_UpperCamelCase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_UpperCamelCase , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_UpperCamelCase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=_UpperCamelCase , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=_UpperCamelCase , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_UpperCamelCase , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_UpperCamelCase , type=_UpperCamelCase , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_UpperCamelCase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_UpperCamelCase , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_UpperCamelCase , type=_UpperCamelCase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_UpperCamelCase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__lowerCAmelCase : str = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__lowerCAmelCase : List[Any] = training_secondary_learner(
_UpperCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__lowerCAmelCase : str = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowerCAmelCase , __lowerCAmelCase : Any = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=_UpperCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCamelCase , secondary_learner=_UpperCamelCase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 86
|
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : str ) ->int:
stooge(UpperCAmelCase__, 0, len(UpperCAmelCase__ ) - 1 )
return arr
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Dict ) ->List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
A__ : str = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
A__ : Tuple = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase__, UpperCAmelCase__, (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCAmelCase__, i + t, (UpperCAmelCase__) )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase__, UpperCAmelCase__, (h - t) )
if __name__ == "__main__":
A_ = input('''Enter numbers separated by a comma:\n''').strip()
A_ = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 370
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple=False ) ->str:
A__ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]=False ) ->str:
for i in range(config.num_hidden_layers ):
if base_model:
A__ : Any = """"""
else:
A__ : Tuple = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
A__ : Tuple = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A__ : str = in_proj_bias[: config.hidden_size]
A__ : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A__ : Any = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any] ) ->Any:
A__ : int = dct.pop(UpperCAmelCase__ )
A__ : Tuple = val
def _lowerCAmelCase ( ) ->List[Any]:
A__ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : int = Image.open(requests.get(UpperCAmelCase__, stream=UpperCAmelCase__ ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any ) ->Tuple:
A__ : List[Any] = DeiTConfig()
# all deit models have fine-tuned heads
A__ : Tuple = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ : str = 1_0_0_0
A__ : List[str] = """huggingface/label-files"""
A__ : Dict = """imagenet-1k-id2label.json"""
A__ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__, UpperCAmelCase__, repo_type="""dataset""" ), """r""" ) )
A__ : Dict = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
A__ : Optional[int] = idalabel
A__ : Dict = {v: k for k, v in idalabel.items()}
A__ : List[str] = int(deit_name[-6:-4] )
A__ : str = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ : List[str] = 1_9_2
A__ : int = 7_6_8
A__ : List[Any] = 1_2
A__ : Dict = 3
elif deit_name[9:].startswith("""small""" ):
A__ : List[Any] = 3_8_4
A__ : List[str] = 1_5_3_6
A__ : Any = 1_2
A__ : Union[str, Any] = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ : int = 1_0_2_4
A__ : str = 4_0_9_6
A__ : Any = 2_4
A__ : int = 1_6
# load original model from timm
A__ : Dict = timm.create_model(UpperCAmelCase__, pretrained=UpperCAmelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ : Tuple = timm_model.state_dict()
A__ : str = create_rename_keys(UpperCAmelCase__, UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# load HuggingFace model
A__ : str = DeiTForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
model.load_state_dict(UpperCAmelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ : int = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ : Any = DeiTImageProcessor(size=UpperCAmelCase__, crop_size=config.image_size )
A__ : Union[str, Any] = image_processor(images=prepare_img(), return_tensors="""pt""" )
A__ : Optional[Any] = encoding["""pixel_values"""]
A__ : Union[str, Any] = model(UpperCAmelCase__ )
A__ : Union[str, Any] = timm_model(UpperCAmelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase__, outputs.logits, atol=1e-3 )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
A_ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 296
| 0
|
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
|
'''simple docstring'''
def UpperCAmelCase ( a_ = 1_0_0 ) -> int:
"""simple docstring"""
A_ : Dict = n * (n + 1) * (2 * n + 1) / 6
A_ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'{solution() = }')
| 344
| 0
|
def lowercase__ ( lowercase_ ,lowercase_ ) -> str:
"""simple docstring"""
if not (isinstance(lowercase_ ,lowercase_ ) and isinstance(lowercase_ ,lowercase_ )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
_UpperCamelCase : Union[str, Any] = len(lowercase_ )
_UpperCamelCase : str = len(lowercase_ )
_UpperCamelCase : Any = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_UpperCamelCase : Tuple = 0
_UpperCamelCase : List[Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_UpperCamelCase : Optional[Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_UpperCamelCase : str = i
_UpperCamelCase : Union[str, Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370
|
"""simple docstring"""
from typing import Any
def lowercase__ ( lowercase_ ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
_UpperCamelCase : Dict = [input_list.count(lowercase_ ) for value in input_list]
_UpperCamelCase : Union[str, Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310
| 0
|
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , **A ) -> Any:
super().__init__(**A )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(A )
def snake_case_( self , **A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = {}
# preprocess args
if "points_per_batch" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , A , *A , A=None , A=None , **A ) -> List[Any]:
return super().__call__(A , *A , num_workers=A , batch_size=A , **A )
def snake_case_( self , A , A=64 , A = 0 , A = 512 / 1500 , A = 32 , A = 1 , ) -> Tuple:
_SCREAMING_SNAKE_CASE = load_image(A )
_SCREAMING_SNAKE_CASE = self.image_processor.size["""longest_edge"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.generate_crop_boxes(
A , A , A , A , A , A )
_SCREAMING_SNAKE_CASE = self.image_processor(images=A , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_SCREAMING_SNAKE_CASE = self.get_inference_context()
with inference_context():
_SCREAMING_SNAKE_CASE = self._ensure_tensor_on_device(A , device=self.device )
_SCREAMING_SNAKE_CASE = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_SCREAMING_SNAKE_CASE = image_embeddings
_SCREAMING_SNAKE_CASE = grid_points.shape[1]
_SCREAMING_SNAKE_CASE = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , A , A ):
_SCREAMING_SNAKE_CASE = grid_points[:, i : i + points_per_batch, :, :]
_SCREAMING_SNAKE_CASE = input_labels[:, i : i + points_per_batch]
_SCREAMING_SNAKE_CASE = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case_( self , A , A=0.88 , A=0.95 , A=0 , A=1 , ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = model_inputs.pop("""input_boxes""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""is_last""" )
_SCREAMING_SNAKE_CASE = model_inputs.pop("""original_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_SCREAMING_SNAKE_CASE = self.model(**A )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_SCREAMING_SNAKE_CASE = model_outputs["""pred_masks"""]
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_masks(
A , A , A , A , binarize=A )
_SCREAMING_SNAKE_CASE = model_outputs["""iou_scores"""]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , A , A , A , A , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case_( self , A , A=False , A=False , A=0.7 , ) -> str:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_SCREAMING_SNAKE_CASE = torch.cat(A )
_SCREAMING_SNAKE_CASE = torch.cat(A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor.post_process_for_mask_generation(
A , A , A , A )
_SCREAMING_SNAKE_CASE = defaultdict(A )
for output in model_outputs:
for k, v in output.items():
extra[k].append(A )
_SCREAMING_SNAKE_CASE = {}
if output_rle_mask:
_SCREAMING_SNAKE_CASE = rle_mask
if output_bboxes_mask:
_SCREAMING_SNAKE_CASE = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 58
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 282
| 0
|
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__snake_case : Optional[Any] = re.compile(r'\b(a|an|the)\b', re.UNICODE)
__snake_case : List[str] = None
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ : Tuple =argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""", metavar="""data.json""", help="""Input data JSON file.""" )
parser.add_argument("""pred_file""", metavar="""pred.json""", help="""Model predictions.""" )
parser.add_argument(
"""--out-file""", """-o""", metavar="""eval.json""", help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""", """-n""", metavar="""na_prob.json""", help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""", """-t""", type=__snake_case, default=1.0, help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""", )
parser.add_argument(
"""--out-image-dir""", """-p""", metavar="""out_images""", default=__snake_case, help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""", """-v""", action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase ( __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
A__ : int ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
A__ : List[Any] =bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __lowerCamelCase ( __snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def remove_articles(__snake_case : Union[str, Any] ):
return ARTICLES_REGEX.sub(""" """, __snake_case )
def white_space_fix(__snake_case : Optional[Any] ):
return " ".join(text.split() )
def remove_punc(__snake_case : Any ):
A__ : Optional[Any] =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case : int ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
if not s:
return []
return normalize_answer(__snake_case ).split()
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) )
def __lowerCamelCase ( __snake_case : Any, __snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
A__ : Optional[Any] =get_tokens(__snake_case )
A__ : Union[str, Any] =get_tokens(__snake_case )
A__ : Optional[Any] =collections.Counter(__snake_case ) & collections.Counter(__snake_case )
A__ : Dict =sum(common.values() )
if len(__snake_case ) == 0 or len(__snake_case ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
A__ : Tuple =1.0 * num_same / len(__snake_case )
A__ : Dict =1.0 * num_same / len(__snake_case )
A__ : Tuple =(2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( __snake_case : int, __snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
A__ : str ={}
A__ : str ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
A__ : int =qa["""id"""]
A__ : str =[t for t in qa["""answers"""]["""text"""] if normalize_answer(__snake_case )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
A__ : int =[""""""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
A__ : Any =preds[qid]
# Take max over all gold answers
A__ : Union[str, Any] =max(compute_exact(__snake_case, __snake_case ) for a in gold_answers )
A__ : int =max(compute_fa(__snake_case, __snake_case ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Tuple, __snake_case : int, __snake_case : List[str] ) -> List[str]:
"""simple docstring"""
A__ : Union[str, Any] ={}
for qid, s in scores.items():
A__ : Dict =na_probs[qid] > na_prob_thresh
if pred_na:
A__ : Union[str, Any] =float(not qid_to_has_ans[qid] )
else:
A__ : Tuple =s
return new_scores
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Any, __snake_case : List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if not qid_list:
A__ : str =len(__snake_case )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_00.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
A__ : List[str] =len(__snake_case )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
for k in new_eval:
A__ : str =new_eval[k]
def __lowerCamelCase ( __snake_case : int, __snake_case : List[str], __snake_case : Any, __snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
plt.step(__snake_case, __snake_case, color="""b""", alpha=0.2, where="""post""" )
plt.fill_between(__snake_case, __snake_case, step="""post""", alpha=0.2, color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__snake_case )
plt.savefig(__snake_case )
plt.clf()
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : str, __snake_case : Tuple, __snake_case : Dict, __snake_case : Any=None, __snake_case : str=None ) -> Tuple:
"""simple docstring"""
A__ : Optional[int] =sorted(__snake_case, key=lambda __snake_case : na_probs[k] )
A__ : Any =0.0
A__ : Any =1.0
A__ : str =0.0
A__ : Tuple =[1.0]
A__ : Dict =[0.0]
A__ : Optional[int] =0.0
for i, qid in enumerate(__snake_case ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
A__ : List[Any] =true_pos / float(i + 1 )
A__ : Dict =true_pos / float(__snake_case )
if i == len(__snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__snake_case )
recalls.append(__snake_case )
if out_image:
plot_pr_curve(__snake_case, __snake_case, __snake_case, __snake_case )
return {"ap": 1_00.0 * avg_prec}
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : List[Any], __snake_case : Union[str, Any], __snake_case : Tuple, __snake_case : List[str], __snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if out_image_dir and not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
A__ : str =sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
A__ : Dict =make_precision_recall_eval(
__snake_case, __snake_case, __snake_case, __snake_case, out_image=os.path.join(__snake_case, """pr_exact.png""" ), title="""Precision-Recall curve for Exact Match score""", )
A__ : Union[str, Any] =make_precision_recall_eval(
__snake_case, __snake_case, __snake_case, __snake_case, out_image=os.path.join(__snake_case, """pr_f1.png""" ), title="""Precision-Recall curve for F1 score""", )
A__ : Union[str, Any] ={k: float(__snake_case ) for k, v in qid_to_has_ans.items()}
A__ : Any =make_precision_recall_eval(
__snake_case, __snake_case, __snake_case, __snake_case, out_image=os.path.join(__snake_case, """pr_oracle.png""" ), title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""", )
merge_eval(__snake_case, __snake_case, """pr_exact""" )
merge_eval(__snake_case, __snake_case, """pr_f1""" )
merge_eval(__snake_case, __snake_case, """pr_oracle""" )
def __lowerCamelCase ( __snake_case : Any, __snake_case : str, __snake_case : Dict, __snake_case : Dict ) -> Tuple:
"""simple docstring"""
if not qid_list:
return
A__ : Any =[na_probs[k] for k in qid_list]
A__ : List[Any] =np.ones_like(__snake_case ) / float(len(__snake_case ) )
plt.hist(__snake_case, weights=__snake_case, bins=20, range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(__snake_case, f"na_prob_hist_{name}.png" ) )
plt.clf()
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : List[str], __snake_case : Optional[Any], __snake_case : str ) -> str:
"""simple docstring"""
A__ : List[str] =sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
A__ : Optional[Any] =num_no_ans
A__ : Dict =cur_score
A__ : Optional[Any] =0.0
A__ : Optional[Any] =sorted(__snake_case, key=lambda __snake_case : na_probs[k] )
for i, qid in enumerate(__snake_case ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
A__ : Any =scores[qid]
else:
if preds[qid]:
A__ : int =-1
else:
A__ : Any =0
cur_score += diff
if cur_score > best_score:
A__ : str =cur_score
A__ : Dict =na_probs[qid]
return 1_00.0 * best_score / len(__snake_case ), best_thresh
def __lowerCamelCase ( __snake_case : Any, __snake_case : Dict, __snake_case : str, __snake_case : Dict, __snake_case : List[Any], __snake_case : List[Any] ) -> int:
"""simple docstring"""
A__ , A__ : Optional[int] =find_best_thresh(__snake_case, __snake_case, __snake_case, __snake_case )
A__ , A__ : List[Any] =find_best_thresh(__snake_case, __snake_case, __snake_case, __snake_case )
A__ : int =best_exact
A__ : int =exact_thresh
A__ : List[str] =best_fa
A__ : str =fa_thresh
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
with open(OPTS.data_file ) as f:
A__ : int =json.load(__snake_case )
A__ : Optional[Any] =dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
A__ : Dict =json.load(__snake_case )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
A__ : Optional[int] =json.load(__snake_case )
else:
A__ : List[Any] ={k: 0.0 for k in preds}
A__ : int =make_qid_to_has_ans(__snake_case ) # maps qid to True/False
A__ : Optional[int] =[k for k, v in qid_to_has_ans.items() if v]
A__ : Dict =[k for k, v in qid_to_has_ans.items() if not v]
A__ , A__ : str =get_raw_scores(__snake_case, __snake_case )
A__ : Any =apply_no_ans_threshold(__snake_case, __snake_case, __snake_case, OPTS.na_prob_thresh )
A__ : List[Any] =apply_no_ans_threshold(__snake_case, __snake_case, __snake_case, OPTS.na_prob_thresh )
A__ : Optional[int] =make_eval_dict(__snake_case, __snake_case )
if has_ans_qids:
A__ : Any =make_eval_dict(__snake_case, __snake_case, qid_list=__snake_case )
merge_eval(__snake_case, __snake_case, """HasAns""" )
if no_ans_qids:
A__ : Any =make_eval_dict(__snake_case, __snake_case, qid_list=__snake_case )
merge_eval(__snake_case, __snake_case, """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__snake_case, __snake_case, __snake_case, __snake_case, __snake_case, OPTS.out_image_dir )
histogram_na_prob(__snake_case, __snake_case, OPTS.out_image_dir, """hasAns""" )
histogram_na_prob(__snake_case, __snake_case, OPTS.out_image_dir, """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file, """w""" ) as f:
json.dump(__snake_case, __snake_case )
else:
print(json.dumps(__snake_case, indent=2 ) )
if __name__ == "__main__":
__snake_case : Tuple = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 136
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def lowercase__ ( self : List[str] , **lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : int ={
"""num_train_timesteps""": 2_01,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_ )
return config
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =10
A__ : str =self.get_scheduler_config()
A__ : Any =self.scheduler_classes[0](**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
A__ : List[Any] =scheduler.timesteps[0]
A__ : Union[str, Any] =scheduler.timesteps[1]
A__ : Optional[int] =self.dummy_sample
A__ : Union[str, Any] =0.1 * sample
A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
A__ : int =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_ )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
A__ : Union[str, Any] =self.scheduler_classes[0]
A__ : Dict =self.get_scheduler_config()
A__ : Any =scheduler_class(**lowerCAmelCase_ )
A__ : int =1
scheduler.set_timesteps(lowerCAmelCase_ )
A__ : Any =scheduler.timesteps
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[Any] =self.dummy_model()
A__ : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_ ):
# 1. scale model input
A__ : Optional[Any] =scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict noise residual
A__ : Union[str, Any] =model(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. predict previous sample x_t-1
A__ : Tuple =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
A__ : Dict =pred_prev_sample
A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) )
A__ : Optional[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ : Dict =self.scheduler_classes[0]
A__ : Dict =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**lowerCAmelCase_ )
A__ : Tuple =[1_06, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
A__ : List[Any] =scheduler.timesteps
A__ : Optional[Any] =torch.manual_seed(0 )
A__ : int =self.dummy_model()
A__ : int =self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A__ : Any =scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# 2. predict noise residual
A__ : List[str] =model(lowerCAmelCase_ , lowerCAmelCase_ )
# 3. predict previous sample x_t-1
A__ : Dict =scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ).prev_sample
A__ : Union[str, Any] =pred_prev_sample
A__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase_ ) )
A__ : List[Any] =torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Union[str, Any] =self.get_scheduler_config()
A__ : List[Any] =scheduler_class(**lowerCAmelCase_ )
A__ : Tuple =[39, 30, 12, 15, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ : Union[str, Any] =self.scheduler_classes[0]
A__ : List[str] =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**lowerCAmelCase_ )
A__ : Dict =[39, 30, 12, 1, 0]
A__ : int =len(lowerCAmelCase_ )
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Any =self.get_scheduler_config()
A__ : Optional[int] =scheduler_class(**lowerCAmelCase_ )
A__ : List[str] =[scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_ )
| 136
| 1
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> int:
'''simple docstring'''
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for a, b in zip(_snake_case , _snake_case ):
self.assertAlmostEqual(_snake_case , _snake_case , delta=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_snake_case ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_snake_case ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_snake_case )
def accumulate_on_replica(_snake_case ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_snake_case , _snake_case ):
with strategy.scope():
__a = strategy.experimental_local_results(_snake_case )
local_variables[0].assign(_snake_case )
local_variables[1].assign(_snake_case )
strategy.run(_snake_case , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_snake_case )
def _check_local_values(_snake_case , _snake_case ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _snake_case , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _snake_case , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 6
|
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowercase__ = get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = """dummy_data"""
lowerCamelCase__ = """datasets"""
lowerCamelCase__ = False
def __init__( self , lowercase , lowercase , lowercase , lowercase = None , lowercase = False , lowercase = True , lowercase = None , ):
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : Dict = dataset_name
_lowerCamelCase : Union[str, Any] = cache_dir
_lowerCamelCase : Dict = use_local_dummy_data
_lowerCamelCase : Tuple = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : str = str(lowercase )
# to be downloaded
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : int = None
@property
def A_ ( self ):
if self._dummy_file is None:
_lowerCamelCase : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A_ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def A_ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def A_ ( self ):
_lowerCamelCase : List[str] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : int = cached_path(
lowercase , cache_dir=self.cache_dir , extract_compressed_file=lowercase , force_extract=lowercase )
return os.path.join(lowercase , self.dummy_file_name )
@property
def A_ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def A_ ( self ):
if self._bucket_url is None:
_lowerCamelCase : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def A_ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def A_ ( self , lowercase , *lowercase ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(lowercase , lowercase ):
return self.create_dummy_data_dict(lowercase , lowercase )
elif isinstance(lowercase , (list, tuple) ):
return self.create_dummy_data_list(lowercase , lowercase )
else:
return self.create_dummy_data_single(lowercase , lowercase )
def A_ ( self , lowercase , *lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , lowercase ):
return self.download_and_extract(lowercase )
def A_ ( self , lowercase , *lowercase , **lowercase ):
return path
def A_ ( self ):
return {}
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(lowercase , lowercase ):
for single_url in single_urls:
download_callback(lowercase )
else:
_lowerCamelCase : List[Any] = single_urls
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(lowercase , lowercase ):
_lowerCamelCase : List[Any] = [os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Optional[int] = single_urls
_lowerCamelCase : List[Any] = os.path.join(lowercase , urllib.parse.quote_plus(Path(lowercase ).name ) )
_lowerCamelCase : int = value
# make sure that values are unique
if all(isinstance(lowercase , lowercase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase ) ) for url in data_url )
_lowerCamelCase : int = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : List[str] = [data_url[0]] * len(lowercase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : str = os.path.join(lowercase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(lowercase )
return dummy_data_list
def A_ ( self , lowercase , lowercase ):
for download_callback in self.download_callbacks:
download_callback(lowercase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Tuple = os.path.join(lowercase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(lowercase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A_ ( self ):
pass
def A_ ( self ):
pass
def A_ ( self , lowercase ):
def _iter_archive_members(lowercase ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : str = Path(self.dummy_file ).parent
_lowerCamelCase : Union[str, Any] = path.relative_to(lowercase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(lowercase )
_lowerCamelCase : Optional[int] = Path(lowercase )
_lowerCamelCase : Dict = _iter_archive_members(lowercase ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(lowercase ).as_posix(), file_path.open('rb' )
def A_ ( self , lowercase ):
if not isinstance(lowercase , lowercase ):
_lowerCamelCase : List[str] = [paths]
for path in paths:
if os.path.isfile(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(lowercase ):
if os.path.basename(lowercase ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(lowercase ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(lowercase , lowercase )
| 96
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __UpperCAmelCase ( __a : Tuple ) -> Optional[int]:
"""simple docstring"""
_a : List[Any] = SwinvaConfig()
_a : Optional[Any] = swinva_name.split('''_''' )
_a : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
_a : List[str] = int(name_split[3][-3:] )
else:
_a : Dict = int(name_split[3] )
if "to" in name_split[2]:
_a : List[Any] = int(name_split[2][-2:] )
else:
_a : Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
_a : Tuple = 96
_a : Optional[Any] = (2, 2, 6, 2)
_a : Any = (3, 6, 12, 24)
elif model_size == "small":
_a : Dict = 96
_a : Dict = (2, 2, 18, 2)
_a : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
_a : Dict = 128
_a : str = (2, 2, 18, 2)
_a : Union[str, Any] = (4, 8, 16, 32)
else:
_a : Dict = 192
_a : List[str] = (2, 2, 18, 2)
_a : Any = (6, 12, 24, 48)
if "to" in swinva_name:
_a : Optional[Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_a : int = 21_841
_a : str = '''huggingface/label-files'''
_a : int = '''imagenet-22k-id2label.json'''
_a : Any = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Any = {int(__a ): v for k, v in idalabel.items()}
_a : str = idalabel
_a : Any = {v: k for k, v in idalabel.items()}
else:
_a : Optional[Any] = 1_000
_a : str = '''huggingface/label-files'''
_a : Optional[int] = '''imagenet-1k-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Any = {int(__a ): v for k, v in idalabel.items()}
_a : int = idalabel
_a : Dict = {v: k for k, v in idalabel.items()}
_a : Dict = img_size
_a : Any = num_classes
_a : str = embed_dim
_a : Dict = depths
_a : Optional[Any] = num_heads
_a : Optional[Any] = window_size
return config
def __UpperCAmelCase ( __a : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
_a : Dict = name.replace('''patch_embed.proj''' ,'''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_a : Tuple = name.replace('''patch_embed.norm''' ,'''embeddings.norm''' )
if "layers" in name:
_a : Union[str, Any] = '''encoder.''' + name
if "attn.proj" in name:
_a : int = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
_a : int = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
_a : Any = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
_a : Optional[Any] = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
_a : Any = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
_a : str = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "q_bias" in name:
_a : Dict = name.replace('''q_bias''' ,'''query.bias''' )
if "k_bias" in name:
_a : List[Any] = name.replace('''k_bias''' ,'''key.bias''' )
if "v_bias" in name:
_a : Dict = name.replace('''v_bias''' ,'''value.bias''' )
if "cpb_mlp" in name:
_a : List[Any] = name.replace('''cpb_mlp''' ,'''continuous_position_bias_mlp''' )
if name == "norm.weight":
_a : str = '''layernorm.weight'''
if name == "norm.bias":
_a : Optional[Any] = '''layernorm.bias'''
if "head" in name:
_a : Any = name.replace('''head''' ,'''classifier''' )
else:
_a : Optional[int] = '''swinv2.''' + name
return name
def __UpperCAmelCase ( __a : Tuple ,__a : Optional[int] ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a : Any = orig_state_dict.pop(__a )
if "mask" in key:
continue
elif "qkv" in key:
_a : List[Any] = key.split('''.''' )
_a : Union[str, Any] = int(key_split[1] )
_a : Optional[int] = int(key_split[3] )
_a : List[str] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_a : Any = val[:dim, :]
_a : Tuple = val[dim : dim * 2, :]
_a : Optional[Any] = val[-dim:, :]
else:
_a : Optional[int] = val[:dim]
_a : List[str] = val[
dim : dim * 2
]
_a : Any = val[-dim:]
else:
_a : List[str] = val
return orig_state_dict
def __UpperCAmelCase ( __a : List[Any] ,__a : Dict ) -> Any:
"""simple docstring"""
_a : Any = timm.create_model(__a ,pretrained=__a )
timm_model.eval()
_a : Optional[int] = get_swinva_config(__a )
_a : str = SwinvaForImageClassification(__a )
model.eval()
_a : Union[str, Any] = convert_state_dict(timm_model.state_dict() ,__a )
model.load_state_dict(__a )
_a : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' ,'''-''' ) ) )
_a : Tuple = Image.open(requests.get(__a ,stream=__a ).raw )
_a : Dict = image_processor(images=__a ,return_tensors='''pt''' )
_a : Any = timm_model(inputs['''pixel_values'''] )
_a : List[str] = model(**__a ).logits
assert torch.allclose(__a ,__a ,atol=1E-3 )
print(F"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__a )
model.push_to_hub(
repo_path_or_name=Path(__a ,__a ) ,organization='''nandwalritik''' ,commit_message='''Add model''' ,)
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 358
|
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
if n == 1 or not isinstance(__a ,__a ):
return 0
elif n == 2:
return 1
else:
_a : Any = [0, 1]
for i in range(2 ,n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Any = 0
_a : Dict = 2
while digits < n:
index += 1
_a : Dict = len(str(fibonacci(__a ) ) )
return index
def __UpperCAmelCase ( __a : int = 1_000 ) -> int:
"""simple docstring"""
return fibonacci_digits_index(__a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 15
| 0
|
"""simple docstring"""
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=False , ):
output_path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , use_external_data_format=lowercase , enable_onnx_checker=lowercase , opset_version=lowercase , )
else:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , opset_version=lowercase , )
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase = False ):
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE_: Any ="""cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
SCREAMING_SNAKE_CASE_: Optional[int] ="""cpu"""
SCREAMING_SNAKE_CASE_: str =StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=lowercase ).to(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Path(lowercase )
# TEXT ENCODER
SCREAMING_SNAKE_CASE_: str =pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE_: List[Any] =pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE_: Dict =pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowercase , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowercase , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=lowercase , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE_: List[str] =pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE_: List[Any] =pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
torch.randn(2 ).to(device=lowercase , dtype=lowercase ),
torch.randn(2 , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=lowercase , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=lowercase , use_external_data_format=lowercase , )
SCREAMING_SNAKE_CASE_: List[Any] =str(unet_path.absolute().as_posix() )
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.dirname(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =onnx.load(lowercase )
# clean up existing tensor files
shutil.rmtree(lowercase )
os.mkdir(lowercase )
# collate external tensor files into one
onnx.save_model(
lowercase , lowercase , save_as_external_data=lowercase , all_tensors_to_one_file=lowercase , location="""weights.pb""" , convert_attribute=lowercase , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE_: Any =pipeline.vae
SCREAMING_SNAKE_CASE_: Any =vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE_: int =vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE_: int =lambda lowercase , lowercase : vae_encoder.encode(lowercase , lowercase )[0].sample()
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowercase , )
# VAE DECODER
SCREAMING_SNAKE_CASE_: Dict =pipeline.vae
SCREAMING_SNAKE_CASE_: List[str] =vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE_: str =vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE_: str =vae_encoder.decode
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowercase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipeline.safety_checker
SCREAMING_SNAKE_CASE_: str =safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE_: Optional[int] =safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE_: Optional[int] =safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowercase , lowercase , lowercase , ).to(device=lowercase , dtype=lowercase ),
torch.randn(1 , lowercase , lowercase , lowercase ).to(device=lowercase , dtype=lowercase ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=lowercase , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE_: Optional[int] =OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
SCREAMING_SNAKE_CASE_: Any =pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =None
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Optional[Any] =OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowercase , feature_extractor=lowercase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowercase )
print("""ONNX pipeline saved to""" , lowercase )
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE_: Any =OnnxStableDiffusionPipeline.from_pretrained(lowercase , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=1_4,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
_UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 173
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __magic_name__ ( lowercase ):
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 173
| 1
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]:
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 99
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 37
lowerCamelCase_ = "gelu"
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.0_2
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = None
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = TFRoFormerModel(config=lowercase )
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = True
lowerCamelCase_ = TFRoFormerForCausalLM(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ = model(lowercase )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = TFRoFormerForMaskedLM(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFRoFormerForSequenceClassification(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFRoFormerForMultipleChoice(config=lowercase )
lowerCamelCase_ = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFRoFormerForTokenClassification(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = TFRoFormerForQuestionAnswering(config=lowercase )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = TFRoFormerModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(lowercase )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
lowerCamelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ = model(lowercase )[0]
# TODO Replace vocab size
lowerCamelCase_ = 50000
lowerCamelCase_ = [1, 6, vocab_size]
self.assertEqual(output.shape , lowercase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowerCamelCase_ = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1e-4 )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = 1e-4
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = tf.constant([[4, 10]] )
lowerCamelCase_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowerCamelCase_ = emba(input_ids.shape )
lowerCamelCase_ = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(lowercase , lowercase , atol=self.tolerance )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowerCamelCase_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowerCamelCase_ = emba.weight[:3, :5]
tf.debugging.assert_near(lowercase , lowercase , atol=self.tolerance )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCAmelCase__ = 1e-4
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
# 2,12,16,64
lowerCamelCase_ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCamelCase_ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowerCamelCase_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowerCamelCase_ = embed_positions([2, 16, 768] )[None, None, :, :]
lowerCamelCase_ , lowerCamelCase_ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase , lowercase , lowercase )
lowerCamelCase_ = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowerCamelCase_ = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase , atol=self.tolerance )
| 47
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A =pd.read_csv('''sample_data.csv''', header=None)
__A =df.shape[:1][0]
# If you're using some other dataset input the target column
__A =df.iloc[:, 1:2]
__A =actual_data.values.reshape(len_data, 1)
__A =MinMaxScaler().fit_transform(actual_data)
__A =1_0
__A =5
__A =2_0
__A =len_data - periods * look_back
__A =actual_data[:division]
__A =actual_data[division - look_back :]
__A, __A =[], []
__A, __A =[], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A =np.array(train_x)
__A =np.array(test_x)
__A =np.array([list(i.ravel()) for i in train_y])
__A =np.array([list(i.ravel()) for i in test_y])
__A =Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__A =model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
__A =model.predict(x_test)
| 47
| 1
|
__lowercase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = set()
# keep track of all the paths to be checked
__UpperCamelCase :Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__UpperCamelCase :Optional[Any] = queue.pop(0 )
# get the last node from the path
__UpperCamelCase :List[Any] = path[-1]
if node not in explored:
__UpperCamelCase :str = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__UpperCamelCase :List[str] = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__UpperCamelCase :int = [start]
__UpperCamelCase :List[str] = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__UpperCamelCase :Any = {start: 0, target: -1}
while queue:
__UpperCamelCase :Optional[int] = queue.pop(0 )
if node == target:
__UpperCamelCase :Tuple = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 43
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__snake_case: Dict = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = """sgugger/tiny-distilbert-classification"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
__snake_case: Any = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: int = TensorFlowBenchmark(A )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = """sshleifer/tiny-gpt2"""
__snake_case: int = AutoConfig.from_pretrained(A )
__snake_case: Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A , [config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[int] = """sshleifer/tiny-gpt2"""
__snake_case: Tuple = AutoConfig.from_pretrained(A )
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: str = TensorFlowBenchmark(A , [config] )
__snake_case: str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = """sshleifer/tiny-gpt2"""
__snake_case: str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = """sshleifer/tiny-gpt2"""
__snake_case: Union[str, Any] = AutoConfig.from_pretrained(A )
__snake_case: Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: List[str] = TensorFlowBenchmark(A , [config] )
__snake_case: Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = """patrickvonplaten/t5-tiny-random"""
__snake_case: List[str] = AutoConfig.from_pretrained(A )
__snake_case: List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
__snake_case: Optional[int] = TensorFlowBenchmark(A , configs=[config] )
__snake_case: Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = """sshleifer/tiny-gpt2"""
__snake_case: List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=A , multi_process=A , )
__snake_case: Union[str, Any] = TensorFlowBenchmark(A )
__snake_case: List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[str] = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(A , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(A , """env.csv""" ) , multi_process=A , )
__snake_case: Tuple = TensorFlowBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(A , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A : Union[str, Any] ):
self.assertTrue(hasattr(A , """sequential""" ) )
self.assertTrue(hasattr(A , """cumulative""" ) )
self.assertTrue(hasattr(A , """current""" ) )
self.assertTrue(hasattr(A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case: Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , """log.txt""" ) , log_print=A , trace_memory_line_by_line=A , eager_mode=A , multi_process=A , )
__snake_case: Dict = TensorFlowBenchmark(A )
__snake_case: List[str] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(A , """log.txt""" ) ).exists() )
| 111
| 0
|
'''simple docstring'''
from collections import defaultdict
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
_a = 1
_a = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase__ )
if ret % 2 == 0:
cuts.append(lowerCAmelCase__ )
return ret
def _A () -> int:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
a_ , a_ : List[Any] = 1_0, 9
a_ : Dict = defaultdict(list)
a_ : dict[int, bool] = {}
a_ : list[int] = []
a_ : Optional[Any] = 0
a_ : Tuple = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 104
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a_ : Tuple = get_tests_dir("fixtures")
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> str:
# A mock response for an HTTP head request to emulate server down
_a = mock.Mock()
_a = 5_00
_a = {}
_a = HTTPError
_a = {}
# Download this model to make sure it's in the cache.
_a = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__magic_name__ ) as mock_head:
_a = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCAmelCase ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_a = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def __UpperCAmelCase ( self ) -> Dict:
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_a = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
_a = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(__magic_name__ )
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def __UpperCAmelCase ( cls ) -> Dict:
_a = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __UpperCAmelCase ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def __UpperCAmelCase ( self ) -> str:
_a = ViTImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__magic_name__ , repo_id='test-image-processor' , push_to_hub=__magic_name__ , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = ViTImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__magic_name__ , repo_id='valid_org/test-image-processor-org' , push_to_hub=__magic_name__ , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __UpperCAmelCase ( self ) -> Any:
CustomImageProcessor.register_for_auto_class()
_a = CustomImageProcessor.from_pretrained(__magic_name__ )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
_a = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 104
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE( ) -> int:
return 1
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE( __lowercase = 2_0_0 ) -> int:
return two_pound(UpperCamelCase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 319
|
import random
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : List[Any] )->tuple:
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(UpperCamelCase__ )
elif element > pivot:
greater.append(UpperCamelCase__ )
else:
equal.append(UpperCamelCase__ )
return less, equal, greater
def UpperCamelCase__( UpperCamelCase__ : list , UpperCamelCase__ : int )->Optional[int]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(UpperCamelCase__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(UpperCamelCase__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(UpperCamelCase__ , UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
A__ = len(UpperCamelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCamelCase__ , UpperCamelCase__ )
# must be in larger
else:
return quick_select(UpperCamelCase__ , index - (m + count) )
| 193
| 0
|
'''simple docstring'''
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = int(__A )
# Initialize Result
__UpperCamelCase = []
# Traverse through all denomination
for denomination in reversed(__A ):
# Find denominations
while int(__A ) >= int(__A ):
total_value -= int(__A )
answer.append(__A ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
a__ : List[Any] = []
a__ : Any = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
a__ : List[Any] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
a__ : Optional[int] = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
a__ : List[str] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
a__ : Tuple = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f'''Following is minimal change for {value}: ''')
a__ : Tuple = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 243
|
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
a__ : Optional[int] = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
a__ : Optional[int] = f'''https://www.google.com/search?q={query}&num=100'''
a__ : Union[str, Any] = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
a__ : Optional[Any] = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
a__ : Union[str, Any] = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 243
| 1
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
for j in range(i + 1 , lowerCAmelCase__ ):
if numbers[j] < numbers[i]:
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase__ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ : Optional[Any] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 224
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """dpr"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : Any=7_6_8 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_2 , SCREAMING_SNAKE_CASE_ : int=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[Any]="gelu" , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : List[str]=1E-12 , SCREAMING_SNAKE_CASE_ : List[Any]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="absolute" , SCREAMING_SNAKE_CASE_ : int = 0 , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : List[str] = projection_dim
lowerCAmelCase_ : List[str] = position_embedding_type
| 224
| 1
|
def UpperCAmelCase ( a_ ) -> float:
"""simple docstring"""
__A = 0
while len(a_ ) > 1:
__A = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__A = files.index(min(a_ ) )
temp += files[min_index]
files.pop(a_ )
files.append(a_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Union[str, Any] = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "bloom"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Optional[Any] ,A : List[Any]=25_08_80 ,A : Optional[int]=64 ,A : List[Any]=2 ,A : Optional[int]=8 ,A : str=1E-5 ,A : str=0.02 ,A : int=True ,A : Optional[Any]=1 ,A : int=2 ,A : str=False ,A : Dict=0.0 ,A : List[Any]=0.0 ,A : str=1 ,A : List[Any]=False ,**A : List[Any] ,):
__A = vocab_size
# Backward compatibility with n_embed kwarg
__A = kwargs.pop("n_embed" ,A )
__A = hidden_size if n_embed is None else n_embed
__A = n_layer
__A = n_head
__A = layer_norm_epsilon
__A = initializer_range
__A = use_cache
__A = pretraining_tp
__A = apply_residual_connection_post_layernorm
__A = hidden_dropout
__A = attention_dropout
__A = bos_token_id
__A = eos_token_id
__A = slow_but_exact
super().__init__(bos_token_id=A ,eos_token_id=A ,**A )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = version.parse("1.12" )
def __init__( self : str ,A : PretrainedConfig ,A : str = "default" ,A : List[PatchingSpec] = None ,A : bool = False ,):
super().__init__(A ,task=A ,patching_specs=A ,use_past=A )
if not getattr(self._config ,"pad_token_id" ,A ):
# TODO: how to do that better?
__A = 0
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(A ,direction="inputs" ,inverted_values_shape=A )
__A = {0: "batch", 1: "past_sequence + sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return common_inputs
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self : List[Any] ):
return self._config.n_head
@property
def UpperCamelCase_ ( self : Optional[int] ):
return 1E-3
def UpperCamelCase_ ( self : Any ,A : "PreTrainedTokenizer" ,A : int = -1 ,A : int = -1 ,A : bool = False ,A : Optional["TensorType"] = None ,):
__A = super(A ,self ).generate_dummy_inputs(
A ,batch_size=A ,seq_length=A ,is_pair=A ,framework=A )
# We need to order the input in the way they appears in the forward()
__A = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__A , __A = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__A = seqlen + 2
__A = self._config.hidden_size // self.num_attention_heads
__A = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__A = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__A = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers )
]
__A = common_inputs["attention_mask"]
if self.use_past:
__A = ordered_inputs["attention_mask"].dtype
__A = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(A ,A ,dtype=A )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self : int ):
return 13
| 124
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
__lowerCAmelCase: Union[str, Any] = str(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = [n]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a__ ( __SCREAMING_SNAKE_CASE ) -> bool:
if len(str(__SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(__SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(__SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def a__ ( __SCREAMING_SNAKE_CASE = 1_1 ) -> list[int]:
__lowerCAmelCase: list[int] = []
__lowerCAmelCase: Any = 1_3
while len(__SCREAMING_SNAKE_CASE ) != count:
if validate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Optional[Any] = list_truncated_nums(__SCREAMING_SNAKE_CASE )
if all(is_prime(__SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(__SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def a__ ( ) -> int:
return sum(compute_truncated_primes(1_1 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(11)) = }''')
| 217
|
"""simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case :
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=0.2 , UpperCamelCase__ : Any=0.2)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = bp_numa
__lowerCAmelCase: Optional[int] = bp_numa
__lowerCAmelCase: Tuple = bp_numa
__lowerCAmelCase: Optional[int] = conva_get[:2]
__lowerCAmelCase: int = conva_get[2]
__lowerCAmelCase: List[str] = size_pa
__lowerCAmelCase: Tuple = rate_w
__lowerCAmelCase: Dict = rate_t
__lowerCAmelCase: List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
__lowerCAmelCase: Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
__lowerCAmelCase: int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
__lowerCAmelCase: Optional[Any] = -2 * np.random.rand(self.conva[1]) + 1
__lowerCAmelCase: int = -2 * np.random.rand(self.num_bpa) + 1
__lowerCAmelCase: str = -2 * np.random.rand(self.num_bpa) + 1
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(UpperCamelCase__ , "wb") as f:
pickle.dump(UpperCamelCase__ , UpperCamelCase__)
print(f"Model saved: {save_path}")
@classmethod
def lowercase_ ( cls : Dict , UpperCamelCase__ : Union[str, Any])-> List[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , "rb") as f:
__lowerCAmelCase: Dict = pickle.load(UpperCamelCase__) # noqa: S301
__lowerCAmelCase: Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
__lowerCAmelCase: List[str] = model_dic.get("size_pooling1")
__lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp1")
__lowerCAmelCase: Any = model_dic.get("num_bp2")
__lowerCAmelCase: Union[str, Any] = model_dic.get("num_bp3")
__lowerCAmelCase: Optional[int] = model_dic.get("rate_weight")
__lowerCAmelCase: int = model_dic.get("rate_thre")
# create model instance
__lowerCAmelCase: Tuple = CNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
# modify model parameter
__lowerCAmelCase: Any = model_dic.get("w_conv1")
__lowerCAmelCase: Optional[Any] = model_dic.get("wkj")
__lowerCAmelCase: Any = model_dic.get("vji")
__lowerCAmelCase: Dict = model_dic.get("thre_conv1")
__lowerCAmelCase: int = model_dic.get("thre_bp2")
__lowerCAmelCase: Optional[int] = model_dic.get("thre_bp3")
return conv_ins
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x))
def lowercase_ ( self : Dict , UpperCamelCase__ : List[Any])-> Optional[Any]:
'''simple docstring'''
return round(UpperCamelCase__ , 3)
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: List[Any] = convs[0]
__lowerCAmelCase: int = convs[1]
__lowerCAmelCase: Union[str, Any] = np.shape(UpperCamelCase__)[0]
# get the data slice of original image data, data_focus
__lowerCAmelCase: Optional[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__):
__lowerCAmelCase: Union[str, Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCamelCase__)
# calculate the feature map of every single kernel, and saved as list of matrix
__lowerCAmelCase: int = []
__lowerCAmelCase: Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(UpperCamelCase__):
__lowerCAmelCase: List[str] = []
for i_focus in range(len(UpperCamelCase__)):
__lowerCAmelCase: Union[str, Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCamelCase__))
__lowerCAmelCase: str = np.asmatrix(UpperCamelCase__).reshape(
UpperCamelCase__ , UpperCamelCase__)
data_featuremap.append(UpperCamelCase__)
# expanding the data slice to One dimenssion
__lowerCAmelCase: Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCamelCase__))
__lowerCAmelCase: List[Any] = np.asarray(UpperCamelCase__)
return focus_list, data_featuremap
def lowercase_ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]="average_pool")-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = len(featuremaps[0])
__lowerCAmelCase: List[Any] = int(size_map / size_pooling)
__lowerCAmelCase: int = []
for i_map in range(len(UpperCamelCase__)):
__lowerCAmelCase: str = featuremaps[i_map]
__lowerCAmelCase: List[Any] = []
for i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):
for j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Any = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCamelCase__))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCamelCase__))
__lowerCAmelCase: Optional[int] = np.asmatrix(UpperCamelCase__).reshape(UpperCamelCase__ , UpperCamelCase__)
featuremap_pooled.append(UpperCamelCase__)
return featuremap_pooled
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str)-> int:
'''simple docstring'''
__lowerCAmelCase: List[Any] = []
for i in range(len(UpperCamelCase__)):
__lowerCAmelCase: Union[str, Any] = np.shape(data[i])
__lowerCAmelCase: int = data[i].reshape(1 , shapes[0] * shapes[1])
__lowerCAmelCase: Dict = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCamelCase__)
__lowerCAmelCase: Any = np.asarray(UpperCamelCase__)
return data_expanded
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = np.asarray(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = np.shape(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def lowercase_ ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: Any = 0
for i_map in range(UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = np.ones((size_map, size_map))
for i in range(0 , UpperCamelCase__ , UpperCamelCase__):
for j in range(0 , UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Optional[Any] = pd_pool[
i_pool
]
__lowerCAmelCase: str = i_pool + 1
__lowerCAmelCase: Dict = np.multiply(
UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(UpperCamelCase__)
return pd_all
def lowercase_ ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str=bool)-> List[str]:
'''simple docstring'''
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(UpperCamelCase__)))
print((" - - Shape: Teach_Data ", np.shape(UpperCamelCase__)))
__lowerCAmelCase: str = 0
__lowerCAmelCase: Optional[int] = []
__lowerCAmelCase: List[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
__lowerCAmelCase: Optional[Any] = 0
print(f"-------------Learning Time {rp}--------------")
for p in range(len(UpperCamelCase__)):
# print('------------Learning Image: %d--------------'%p)
__lowerCAmelCase: Dict = np.asmatrix(datas_train[p])
__lowerCAmelCase: Dict = np.asarray(datas_teach[p])
__lowerCAmelCase , __lowerCAmelCase: int = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga)
__lowerCAmelCase: Optional[Any] = np.shape(UpperCamelCase__)
__lowerCAmelCase: str = self._expand(UpperCamelCase__)
__lowerCAmelCase: str = data_bp_input
__lowerCAmelCase: int = np.dot(UpperCamelCase__ , self.vji.T) - self.thre_bpa
__lowerCAmelCase: int = self.sig(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = np.dot(UpperCamelCase__ , self.wkj.T) - self.thre_bpa
__lowerCAmelCase: str = self.sig(UpperCamelCase__)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__lowerCAmelCase: Union[str, Any] = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))
__lowerCAmelCase: Any = np.multiply(
np.dot(UpperCamelCase__ , self.wkj) , np.multiply(UpperCamelCase__ , (1 - bp_outa)))
__lowerCAmelCase: str = np.dot(UpperCamelCase__ , self.vji)
__lowerCAmelCase: Union[str, Any] = pd_i_all / (self.size_poolinga * self.size_poolinga)
__lowerCAmelCase: str = pd_conva_pooled.T.getA().tolist()
__lowerCAmelCase: str = self._calculate_gradient_from_pool(
UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
__lowerCAmelCase: List[Any] = self._expand_mat(pd_conva_all[k_conv])
__lowerCAmelCase: int = self.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: Tuple = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
__lowerCAmelCase: Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
__lowerCAmelCase: List[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__lowerCAmelCase: Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__lowerCAmelCase: Tuple = self.thre_bpa - pd_k_all * self.rate_thre
__lowerCAmelCase: Optional[int] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__lowerCAmelCase: List[str] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__lowerCAmelCase: Tuple = rp + 1
__lowerCAmelCase: Optional[Any] = error_count / patterns
all_mse.append(UpperCamelCase__)
def draw_error():
__lowerCAmelCase: Dict = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(UpperCamelCase__ , "+-")
plt.plot(UpperCamelCase__ , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(UpperCamelCase__ , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, f" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(UpperCamelCase__)))
for p in range(len(UpperCamelCase__)):
__lowerCAmelCase: Dict = np.asmatrix(datas_test[p])
__lowerCAmelCase , __lowerCAmelCase: Optional[int] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Tuple = self.pooling(UpperCamelCase__ , self.size_poolinga)
__lowerCAmelCase: List[str] = self._expand(UpperCamelCase__)
__lowerCAmelCase: int = data_bp_input
__lowerCAmelCase: List[Any] = bp_outa * self.vji.T - self.thre_bpa
__lowerCAmelCase: Any = self.sig(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = bp_outa * self.wkj.T - self.thre_bpa
__lowerCAmelCase: List[str] = self.sig(UpperCamelCase__)
produce_out.extend(bp_outa.getA().tolist())
__lowerCAmelCase: Tuple = [list(map(self.do_round , UpperCamelCase__)) for each in produce_out]
return np.asarray(UpperCamelCase__)
def lowercase_ ( self : int , UpperCamelCase__ : Any)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = np.asmatrix(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__lowerCAmelCase: Any = self.pooling(UpperCamelCase__ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 217
| 1
|
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
raise ValueError('''Input must be an integer''')
if input_num <= 0:
raise ValueError('''Input must be positive''')
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1) if input_num % divisor == 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : Tuple =logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Dict=False ,lowerCamelCase_ : List[Any]=False ,lowerCamelCase_ : int=False):
'''simple docstring'''
lowerCAmelCase__ : Tuple = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight"""))
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias"""))
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight"""))
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias"""))
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight"""))
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias"""))
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight"""))
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias"""))
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight"""))
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias"""))
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
])
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
])
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
])
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
])
else:
pass
return rename_keys
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
for i in range(config.num_hidden_layers):
lowerCAmelCase__ : Optional[Any] = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Dict = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""")
lowerCAmelCase__ : Tuple = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( lowerCamelCase_ : List[str]):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : int = dct.pop(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = val
@torch.no_grad()
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ViltConfig(image_size=384 ,patch_size=32 ,tie_word_embeddings=lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Any = False
if "vqa" in checkpoint_url:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : List[Any] = 3129
lowerCAmelCase__ : List[Any] = '''huggingface/label-files'''
lowerCAmelCase__ : Union[str, Any] = '''vqa2-id2label.json'''
lowerCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_ ,repo_type='''dataset''') ,'''r'''))
lowerCAmelCase__ : Optional[Any] = {int(lowerCamelCase_): v for k, v in idalabel.items()}
lowerCAmelCase__ : Dict = idalabel
lowerCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = ViltForQuestionAnswering(lowerCamelCase_)
elif "nlvr" in checkpoint_url:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[Any] = 2
lowerCAmelCase__ : Optional[Any] = {0: '''False''', 1: '''True'''}
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in config.idalabel.items()}
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : int = ViltForImagesAndTextClassification(lowerCamelCase_)
elif "irtr" in checkpoint_url:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = ViltForImageAndTextRetrieval(lowerCamelCase_)
elif "mlm_itm" in checkpoint_url:
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : int = ViltForMaskedLM(lowerCamelCase_)
else:
raise ValueError('''Unknown model type''')
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,map_location='''cpu''')['''state_dict''']
lowerCAmelCase__ : Tuple = create_rename_keys(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
for src, dest in rename_keys:
rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
read_in_q_k_v(lowerCamelCase_ ,lowerCamelCase_)
if mlm_model or irtr_model:
lowerCAmelCase__ : int = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_)
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowerCAmelCase__ , lowerCAmelCase__ : str = model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_)
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(lowerCamelCase_)
# Define processor
lowerCAmelCase__ : List[str] = ViltImageProcessor(size=384)
lowerCAmelCase__ : Tuple = BertTokenizer.from_pretrained('''bert-base-uncased''')
lowerCAmelCase__ : Union[str, Any] = ViltProcessor(lowerCamelCase_ ,lowerCamelCase_)
# Forward pass on example inputs (image + text)
if nlvr_model:
lowerCAmelCase__ : int = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' ,stream=lowerCamelCase_).raw)
lowerCAmelCase__ : Union[str, Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' ,stream=lowerCamelCase_).raw)
lowerCAmelCase__ : Union[str, Any] = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
lowerCAmelCase__ : Optional[int] = processor(lowerCamelCase_ ,lowerCamelCase_ ,return_tensors='''pt''')
lowerCAmelCase__ : Tuple = processor(lowerCamelCase_ ,lowerCamelCase_ ,return_tensors='''pt''')
lowerCAmelCase__ : Union[str, Any] = model(
input_ids=encoding_a.input_ids ,pixel_values=encoding_a.pixel_values ,pixel_values_a=encoding_a.pixel_values ,)
else:
lowerCAmelCase__ : Dict = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' ,stream=lowerCamelCase_).raw)
if mlm_model:
lowerCAmelCase__ : int = '''a bunch of [MASK] laying on a [MASK].'''
else:
lowerCAmelCase__ : Optional[int] = '''How many cats are there?'''
lowerCAmelCase__ : Optional[int] = processor(lowerCamelCase_ ,lowerCamelCase_ ,return_tensors='''pt''')
lowerCAmelCase__ : Any = model(**lowerCamelCase_)
# Verify outputs
if mlm_model:
lowerCAmelCase__ : Dict = torch.Size([1, 11, 30522])
lowerCAmelCase__ : int = torch.tensor([-12.5061, -12.5123, -12.5174])
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,lowerCamelCase_ ,atol=1E-4)
# verify masked token prediction equals "cats"
lowerCAmelCase__ : Optional[Any] = outputs.logits[0, 4, :].argmax(-1).item()
assert tokenizer.decode([predicted_id]) == "cats"
elif vqa_model:
lowerCAmelCase__ : List[Any] = torch.Size([1, 3129])
lowerCAmelCase__ : str = torch.tensor([-15.9495, -18.1472, -10.3041])
assert torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1E-4)
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] ,lowerCamelCase_ ,atol=1E-4)
# verify vqa prediction equals "2"
lowerCAmelCase__ : List[Any] = outputs.logits.argmax(-1).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 2])
lowerCAmelCase__ : Dict = torch.tensor([-2.8721, 2.1291])
assert torch.allclose(outputs.logits[0, :3] ,lowerCamelCase_ ,atol=1E-4)
assert outputs.logits.shape == expected_shape
Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_)
print(f"""Saving model and processor to {pytorch_dump_folder_path}""")
model.save_pretrained(lowerCamelCase_)
processor.save_pretrained(lowerCamelCase_)
if __name__ == "__main__":
__snake_case : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : Union[str, Any] =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 94
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( __snake_case : int ):
lowercase_ : Tuple = [True] * limit
lowercase_ : List[str] = False
lowercase_ : Tuple = False
lowercase_ : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase_ : Any = i * 2
while index < limit:
lowercase_ : Dict = False
lowercase_ : Union[str, Any] = index + i
lowercase_ : Optional[int] = [2]
for i in range(3 , __snake_case , 2 ):
if is_prime[i]:
primes.append(__snake_case )
return primes
def lowercase ( __snake_case : int = 1_0_0_0_0_0_0 ):
lowercase_ : List[str] = prime_sieve(__snake_case )
lowercase_ : List[Any] = 0
lowercase_ : List[str] = 0
for i in range(len(__snake_case ) ):
for j in range(i + length , len(__snake_case ) ):
lowercase_ : Tuple = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase_ : str = j - i
lowercase_ : Any = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33
|
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
_snake_case = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_snake_case = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_snake_case = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=99 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=4 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=0.02 , ) -> Union[str, Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
_snake_case = initializer_range
def lowercase (self ) -> str:
_snake_case = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_snake_case = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 )
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase , )
_snake_case = prepare_blenderbot_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, inputs_dict
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
_snake_case = 20
_snake_case = model_class_name(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] )
_snake_case, _snake_case = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
_snake_case = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase , )
_snake_case = model.decode(UpperCAmelCase , UpperCAmelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
_snake_case = 20
_snake_case = model_class_name(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] )
_snake_case, _snake_case = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_snake_case = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_snake_case = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase , UpperCAmelCase )
_snake_case = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_snake_case = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_snake_case = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase , decoder_position_ids=UpperCAmelCase , )
_snake_case = model.decode(UpperCAmelCase , UpperCAmelCase , decoder_attention_mask=UpperCAmelCase )
_snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = 99
def lowercase (self ) -> Any:
_snake_case = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_snake_case = input_ids.shape[0]
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case = self._get_config_and_data()
_snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
_snake_case = lm_model(input_ids=UpperCAmelCase )
_snake_case = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase )
def lowercase (self ) -> int:
_snake_case = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_snake_case = FlaxBlenderbotForConditionalGeneration(UpperCAmelCase )
_snake_case = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_snake_case = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_snake_case = lm_model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
_snake_case = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_snake_case = shift_tokens_right(UpperCAmelCase , 1 , 2 )
_snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
_snake_case = np.equal(UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class _lowerCAmelCase ( __snake_case , unittest.TestCase , __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase (self ) -> Any:
_snake_case = FlaxBlenderbotModelTester(self )
def lowercase (self ) -> str:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
_snake_case = model_class(UpperCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model.encode(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
_snake_case = encode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case = encode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase (self ) -> str:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case = model_class(UpperCAmelCase )
_snake_case = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_snake_case = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return model.decode(
decoder_input_ids=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , encoder_outputs=UpperCAmelCase , )
with self.subTest("""JIT Enabled""" ):
_snake_case = decode_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_snake_case = decode_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase (self ) -> Any:
for model_class_name in self.all_model_classes:
_snake_case = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_snake_case = np.ones((1, 1) ) * model.config.eos_token_id
_snake_case = model(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def lowercase (self ) -> Dict:
_snake_case = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
_snake_case = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
_snake_case = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCAmelCase )
_snake_case = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
_snake_case = ["""Sam"""]
_snake_case = tokenizer(UpperCAmelCase , return_tensors="""jax""" )
_snake_case = model.generate(**UpperCAmelCase , **UpperCAmelCase )
_snake_case = """Sam is a great name. It means \"sun\" in Gaelic."""
_snake_case = tokenizer.batch_decode(UpperCAmelCase , **UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 341
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = WavaVecaForSequenceClassification.from_pretrained(A__ ,config=A__ )
UpperCAmelCase_ : List[Any] = downstream_dict["projector.weight"]
UpperCAmelCase_ : Any = downstream_dict["projector.bias"]
UpperCAmelCase_ : Union[str, Any] = downstream_dict["model.post_net.linear.weight"]
UpperCAmelCase_ : Optional[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = WavaVecaForAudioFrameClassification.from_pretrained(A__ ,config=A__ )
UpperCAmelCase_ : Dict = downstream_dict["model.linear.weight"]
UpperCAmelCase_ : Tuple = downstream_dict["model.linear.bias"]
return model
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = WavaVecaForXVector.from_pretrained(A__ ,config=A__ )
UpperCAmelCase_ : Union[str, Any] = downstream_dict["connector.weight"]
UpperCAmelCase_ : int = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase_ : List[Any] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase_ : Union[str, Any] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase_ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
UpperCAmelCase_ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
UpperCAmelCase_ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
UpperCAmelCase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
UpperCAmelCase_ : Union[str, Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : Dict = checkpoint["Downstream"]
UpperCAmelCase_ : Optional[int] = WavaVecaConfig.from_pretrained(A__ )
UpperCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
A__ ,return_attention_mask=A__ ,do_normalize=A__ )
UpperCAmelCase_ : Optional[int] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCAmelCase_ : Tuple = convert_classification(A__ ,A__ ,A__ )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCAmelCase_ : int = convert_diarization(A__ ,A__ ,A__ )
elif arch.endswith("ForXVector" ):
UpperCAmelCase_ : int = convert_xvector(A__ ,A__ ,A__ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase_ : Optional[Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(A__ )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
lowerCamelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 253
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
UpperCAmelCase_ : list[float] = list(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = degree
def __add__( self : int , lowerCAmelCase_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[str] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int | float ) -> int | float:
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
UpperCAmelCase_ : str = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Union[str, Any] , lowerCAmelCase_ : object ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowerCAmelCase_ : object ) -> bool:
return not self.__eq__(lowerCAmelCase_ )
| 253
| 1
|
from __future__ import annotations
from math import pi
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :str )->dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
SCREAMING_SNAKE_CASE :Union[str, Any] = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
snake_case_ = "dummy_data"
snake_case_ = "datasets"
snake_case_ = False
def __init__( self : Optional[int] ,A : str ,A : str ,A : Union[Version, str] ,A : Optional[str] = None ,A : bool = False ,A : bool = True ,A : Optional[List[Callable]] = None ,):
__A = 0
__A = dataset_name
__A = cache_dir
__A = use_local_dummy_data
__A = config
# download_callbacks take a single url as input
__A = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__A = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__A = str(A )
# to be downloaded
__A = None
__A = None
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
if self._dummy_file is None:
__A = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Optional[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : Tuple ):
__A = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__A = cached_path(
A ,cache_dir=self.cache_dir ,extract_compressed_file=A ,force_extract=A )
return os.path.join(A ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : str ):
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : Any ):
if self._bucket_url is None:
__A = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Tuple ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : List[str] ,A : List[Any] ,*A : Dict ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__A = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__A = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A ,A ):
return self.create_dummy_data_dict(A ,A )
elif isinstance(A ,(list, tuple) ):
return self.create_dummy_data_list(A ,A )
else:
return self.create_dummy_data_single(A ,A )
def UpperCamelCase_ ( self : str ,A : List[Any] ,*A : List[Any] ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : List[str] ,A : List[str] ,A : Tuple ):
return self.download_and_extract(A )
def UpperCamelCase_ ( self : Any ,A : Any ,*A : Optional[Any] ,**A : List[str] ):
return path
def UpperCamelCase_ ( self : str ):
return {}
def UpperCamelCase_ ( self : int ,A : int ,A : Tuple ):
__A = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A ,A ):
for single_url in single_urls:
download_callback(A )
else:
__A = single_urls
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A ,A ):
__A = [os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) ) for x in single_urls]
else:
__A = single_urls
__A = os.path.join(A ,urllib.parse.quote_plus(Path(A ).name ) )
__A = value
# make sure that values are unique
if all(isinstance(A ,A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__A = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ):
__A = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__A = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,A ) ) for url in data_url )
__A = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__A = [data_url[0]] * len(A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(A )
return dummy_data_list
def UpperCamelCase_ ( self : str ,A : List[Any] ,A : Optional[Any] ):
for download_callback in self.download_callbacks:
download_callback(A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__A = os.path.join(A ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : int ):
pass
def UpperCamelCase_ ( self : Dict ):
pass
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
def _iter_archive_members(A : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
__A = Path(self.dummy_file ).parent
__A = path.relative_to(A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__A = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A )
__A = Path(A )
__A = _iter_archive_members(A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(A ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
if not isinstance(A ,A ):
__A = [paths]
for path in paths:
if os.path.isfile(A ):
if os.path.basename(A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A ):
if os.path.basename(A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(A ,A )
| 15
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class a__ ( snake_case__ ):
_a : List[str] = """imagegpt"""
_a : List[str] = ["""past_key_values"""]
_a : int = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=5_1_2 + 1 , _A=3_2 * 3_2 , _A=5_1_2 , _A=2_4 , _A=8 , _A=None , _A="quick_gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=False , _A=False , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=_A , **_A )
class a__ ( snake_case__ ):
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def __SCREAMING_SNAKE_CASE( self , _A , _A = 1 , _A = -1 , _A = False , _A = None , _A = 3 , _A = 3_2 , _A = 3_2 , ):
"""simple docstring"""
__lowerCAmelCase = self._generate_dummy_images(_A , _A , _A , _A )
__lowerCAmelCase = dict(preprocessor(images=_A , return_tensors=_A ) )
return inputs
| 354
|
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase , __lowerCAmelCase = [], []
while len(SCREAMING_SNAKE_CASE_ ) > 1:
__lowerCAmelCase , __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
start.append(SCREAMING_SNAKE_CASE_ )
end.append(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 102
| 0
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def _UpperCamelCase ( __A ) -> Union[str, Any]:
'''simple docstring'''
return EnvironmentCommand()
class lowercase_ ( a__ ):
@staticmethod
def __a ( a ):
UpperCamelCase__ = parser.add_parser("env" )
download_parser.set_defaults(func=a )
def __a ( self ):
UpperCamelCase__ = huggingface_hub.__version__
UpperCamelCase__ = "not installed"
UpperCamelCase__ = "NA"
if is_torch_available():
import torch
UpperCamelCase__ = torch.__version__
UpperCamelCase__ = torch.cuda.is_available()
UpperCamelCase__ = "not installed"
if is_transformers_available():
import transformers
UpperCamelCase__ = transformers.__version__
UpperCamelCase__ = "not installed"
if is_accelerate_available():
import accelerate
UpperCamelCase__ = accelerate.__version__
UpperCamelCase__ = "not installed"
if is_xformers_available():
import xformers
UpperCamelCase__ = xformers.__version__
UpperCamelCase__ = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a ) )
return info
@staticmethod
def __a ( a ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 80
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : List[Any] = {'configuration_glpn': ['GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GLPNConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['GLPNFeatureExtractor']
__UpperCamelCase : List[str] = ['GLPNImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
'GLPN_PRETRAINED_MODEL_ARCHIVE_LIST',
'GLPNForDepthEstimation',
'GLPNLayer',
'GLPNModel',
'GLPNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 182
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: List[str] = logging.get_logger(__name__)
__a: List[Any] = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "distilbert"
SCREAMING_SNAKE_CASE = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __lowerCAmelCase=30522 , __lowerCAmelCase=512 , __lowerCAmelCase=False , __lowerCAmelCase=6 , __lowerCAmelCase=12 , __lowerCAmelCase=768 , __lowerCAmelCase=4 * 768 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.2 , __lowerCAmelCase=0 , **__lowerCAmelCase , ) -> Tuple:
lowercase__ : int = vocab_size
lowercase__ : List[str] = max_position_embeddings
lowercase__ : Union[str, Any] = sinusoidal_pos_embds
lowercase__ : List[str] = n_layers
lowercase__ : int = n_heads
lowercase__ : int = dim
lowercase__ : str = hidden_dim
lowercase__ : Optional[int] = dropout
lowercase__ : str = attention_dropout
lowercase__ : Tuple = activation
lowercase__ : Tuple = initializer_range
lowercase__ : Dict = qa_dropout
lowercase__ : Union[str, Any] = seq_classif_dropout
super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
@property
def _lowerCAmelCase( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 214
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a: Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = "utf-8"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True # deprecated
SCREAMING_SNAKE_CASE = None # deprecated
SCREAMING_SNAKE_CASE = 1_0 << 2_0 # 10MB
SCREAMING_SNAKE_CASE = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = JsonConfig
def _lowerCAmelCase( self ) -> Any:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowercase__ : Tuple = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
lowercase__ : List[str] = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = [files]
lowercase__ : Dict = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase__ : str = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : List[str] = [files]
lowercase__ : Optional[Any] = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def _lowerCAmelCase( self , __lowerCAmelCase ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase__ : Optional[int] = self.config.features.arrow_schema.field(__lowerCAmelCase ).type
lowercase__ : Union[str, Any] = pa_table.append_column(__lowerCAmelCase , pa.array([None] * len(__lowerCAmelCase ) , type=__lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ : Dict = table_cast(__lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase__ : Dict = json.load(__lowerCAmelCase )
# We keep only the field we are interested in
lowercase__ : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCAmelCase , (list, tuple) ):
lowercase__ : List[Any] = set().union(*[row.keys() for row in dataset] )
lowercase__ : List[Any] = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
else:
lowercase__ : Union[str, Any] = dataset
lowercase__ : Optional[int] = pa.Table.from_pydict(__lowerCAmelCase )
yield file_idx, self._cast_table(__lowerCAmelCase )
# If the file has one json object per line
else:
with open(__lowerCAmelCase , '''rb''' ) as f:
lowercase__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
lowercase__ : Union[str, Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowercase__ : Tuple = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase__ : Dict = batch.decode(self.config.encoding , errors=__lowerCAmelCase ).encode('''utf-8''' )
try:
while True:
try:
lowercase__ : str = paj.read_json(
io.BytesIO(__lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=__lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCAmelCase )
or block_size > len(__lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__lowerCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase__ : Optional[int] = json.load(__lowerCAmelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
lowercase__ : List[Any] = set().union(*[row.keys() for row in dataset] )
lowercase__ : str = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
lowercase__ : Optional[int] = pa.Table.from_pydict(__lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__lowerCAmelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase )
batch_idx += 1
| 214
| 1
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Union[str, Any] =["image_processor", "tokenizer"]
lowerCamelCase : int ="BlipImageProcessor"
lowerCamelCase : int ="AutoTokenizer"
def __init__( self : Dict , a : Any , a : Union[str, Any] , a : str ):
"""simple docstring"""
super().__init__(a , a )
# add QFormer tokenizer
__lowerCamelCase = qformer_tokenizer
def __call__( self : Union[str, Any] , a : ImageInput = None , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : List[Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
__lowerCamelCase = BatchFeature()
if text is not None:
__lowerCamelCase = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
__lowerCamelCase = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
__lowerCamelCase = qformer_text_encoding.pop('''input_ids''' )
__lowerCamelCase = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
__lowerCamelCase = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def SCREAMING_SNAKE_CASE__ ( self : Dict , *a : Dict , **a : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Any , *a : List[str] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer.model_input_names
__lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , a : str , **a : List[Any] ):
"""simple docstring"""
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
__lowerCamelCase = os.path.join(a , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : str , a : List[str] , **a : Dict ):
"""simple docstring"""
__lowerCamelCase = AutoTokenizer.from_pretrained(a , subfolder='''qformer_tokenizer''' )
__lowerCamelCase = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 67
|
'''simple docstring'''
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowercase( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Optional[int] = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase( self , A ) -> int:
UpperCAmelCase : Optional[int] = """this is a test"""
UpperCAmelCase : Dict = """this is a test"""
return input_text, output_text
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = """<pad>"""
UpperCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(A ) , 30000 )
def _lowercase( self ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : List[str] = self.get_rust_tokenizer()
UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé."""
UpperCAmelCase : str = tokenizer.tokenize(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] )
UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : str = AlbertTokenizer(A )
UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowercase( self ) -> Dict:
# fmt: off
UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 265
| 0
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_UpperCAmelCase = get_logger(__name__)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , lowercase = None ):
"""simple docstring"""
A_ : Dict = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
A_ : Union[str, Any] = Extractor
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
A_ : Tuple = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
A_ : Optional[int] = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
A_ : Union[str, Any] = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class UpperCAmelCase ( __A ):
'''simple docstring'''
@classmethod
@abstractmethod
def lowerCAmelCase_ ( cls , lowercase , **lowercase ):
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
...
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase_ = []
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
with open(lowercase , 'rb' ) as f:
return f.read(lowercase )
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = b"" ):
"""simple docstring"""
if not magic_number:
A_ : Dict = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
A_ : List[str] = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase ( __A ):
'''simple docstring'''
@classmethod
def lowerCAmelCase_ ( cls , lowercase , **lowercase ):
"""simple docstring"""
return tarfile.is_tarfile(lowercase )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
A_ : str = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
A_ : Optional[int] = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
os.makedirs(lowercase , exist_ok=lowercase )
A_ : Any = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = [B'''\x1F\x8B''']
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
with gzip.open(lowercase , 'rb' ) as gzip_file:
with open(lowercase , 'wb' ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = b"" ):
"""simple docstring"""
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , 'rb' ) as fp:
A_ : List[str] = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
A_ : str = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
A_ : List[Any] = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , 'r' ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , 'wb' ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
A_ : List[Any] = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
A_ : Optional[Any] = zstd.ZstdDecompressor()
with open(lowercase , 'rb' ) as ifh, open(lowercase , 'wb' ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = [B'''\x42\x5A\x68''']
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
with bza.open(lowercase , 'rb' ) as compressed_file:
with open(lowercase , 'wb' ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , 'r' ) as archive:
archive.extractall(lowercase )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = [B'''\x04\x22\x4D\x18''']
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(lowercase , 'rb' ) as compressed_file:
with open(lowercase , 'wb' ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase = False ):
"""simple docstring"""
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=lowercase , )
A_ : int = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase_ ( cls , lowercase ): # <Added version="2.4.0"/>
"""simple docstring"""
A_ : List[Any] = cls._get_magic_number_max_length()
A_ : Optional[Any] = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ):
"""simple docstring"""
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
A_ : str = str(Path(lowercase ).with_suffix('.lock' ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=lowercase , )
A_ : Union[str, Any] = extractor if extractor != 'deprecated' else extractor_format
else:
A_ : Optional[Any] = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 192
|
from __future__ import annotations
import requests
_UpperCAmelCase = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCamelCase ( __lowercase : str ,__lowercase : int = 1 ,__lowercase : str = "new" ,__lowercase : list | None = None ):
'''simple docstring'''
A_ : Tuple = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
A_ : int = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(__lowercase )
A_ : Optional[int] = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' ,headers={'User-agent': 'A random string'} ,)
if response.status_code == 4_29:
raise requests.HTTPError
A_ : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
A_ : Union[str, Any] = {}
for id_ in range(__lowercase ):
A_ : List[str] = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 192
| 1
|
from collections import deque
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = len(_lowerCAmelCase)
UpperCamelCase_ = deque()
UpperCamelCase_ = [False for _ in range(_lowerCAmelCase)]
UpperCamelCase_ = [-1 for _ in range(_lowerCAmelCase)]
UpperCamelCase_ = index_of[:]
def strong_connect(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = index # the number when this node is seen
UpperCamelCase_ = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCAmelCase)
UpperCamelCase_ = True
for w in g[v]:
if index_of[w] == -1:
UpperCamelCase_ = strong_connect(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
UpperCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCamelCase_ = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCamelCase_ = []
UpperCamelCase_ = stack.pop()
UpperCamelCase_ = False
component.append(_lowerCAmelCase)
while w != v:
UpperCamelCase_ = stack.pop()
UpperCamelCase_ = False
component.append(_lowerCAmelCase)
components.append(_lowerCAmelCase)
return index
UpperCamelCase_ = []
for v in range(_lowerCAmelCase):
if index_of[v] == -1:
strong_connect(_lowerCAmelCase , 0 , _lowerCAmelCase)
return components
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = [[] for _ in range(_lowerCAmelCase)]
for u, v in edges:
g[u].append(_lowerCAmelCase)
return g
if __name__ == "__main__":
# Test
UpperCAmelCase : List[Any] =7
UpperCAmelCase : Any =[0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase : str =[1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase : List[Any] =[(u, v) for u, v in zip(source, target)]
UpperCAmelCase : str =create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 128
|
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase : Any ="""
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"A red cartoon frog, 4k\"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16
... )
>>> pipe.to(\"cuda\")
>>> init_image = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/frog.png\"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save(\"red_frog.png\")
```
"""
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=8):
UpperCamelCase_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCamelCase_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase=5_12 , _lowerCAmelCase=5_12):
UpperCamelCase_ = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1)
UpperCamelCase_ = np.array(pil_image.convert("RGB"))
UpperCamelCase_ = arr.astype(np.floataa) / 127.5 - 1
UpperCamelCase_ = np.transpose(_lowerCAmelCase , [2, 0, 1])
UpperCamelCase_ = torch.from_numpy(_lowerCAmelCase).unsqueeze(0)
return image
class _lowercase (a_ ):
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
UpperCamelCase_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = min(int(num_inference_steps * strength ) , snake_case__ )
UpperCamelCase_ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ):
'''simple docstring'''
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
UpperCamelCase_ = image.to(device=snake_case__ , dtype=snake_case__ )
UpperCamelCase_ = batch_size * num_images_per_prompt
if image.shape[1] == 4:
UpperCamelCase_ = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
else:
UpperCamelCase_ = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
UpperCamelCase_ = self.movq.config.scaling_factor * init_latents
UpperCamelCase_ = torch.cat([init_latents] , dim=0 )
UpperCamelCase_ = init_latents.shape
UpperCamelCase_ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
UpperCamelCase_ = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = init_latents
return latents
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def _lowerCamelCase ( self , snake_case__=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCamelCase_ = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCamelCase_ , UpperCamelCase_ = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
UpperCamelCase_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowerCamelCase ( self ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 100 , snake_case__ = 4.0 , snake_case__ = 0.3 , snake_case__ = 1 , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
UpperCamelCase_ = self._execution_device
UpperCamelCase_ = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
UpperCamelCase_ = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase_ = image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
UpperCamelCase_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase_ = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
UpperCamelCase_ = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
UpperCamelCase_ = image.to(dtype=image_embeds.dtype , device=snake_case__ )
UpperCamelCase_ = self.movq.encode(snake_case__ )["latents"]
UpperCamelCase_ = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase_ = timesteps[:1].repeat(batch_size * num_images_per_prompt )
UpperCamelCase_ , UpperCamelCase_ = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
UpperCamelCase_ = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = {"image_embeds": image_embeds}
UpperCamelCase_ = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ , UpperCamelCase_ = variance_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase_ , UpperCamelCase_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
UpperCamelCase_ = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCamelCase_ = image * 0.5 + 0.5
UpperCamelCase_ = image.clamp(0 , 1 )
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 128
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Dict = UnCLIPImageVariationPipeline
__lowerCAmelCase : Union[str, Any] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__lowerCAmelCase : Dict = IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase : Any = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__lowerCAmelCase : List[Any] = False
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return 100
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
UpperCAmelCase : List[str] = UnCLIPTextProjModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
UpperCAmelCase : List[Any] = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
torch.manual_seed(1 )
UpperCAmelCase : List[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Any = self.dummy_decoder
UpperCAmelCase : Tuple = self.dummy_text_proj
UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase : Optional[Any] = self.dummy_tokenizer
UpperCAmelCase : Optional[Any] = self.dummy_super_res_first
UpperCAmelCase : Tuple = self.dummy_super_res_last
UpperCAmelCase : Tuple = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
UpperCAmelCase : int = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
UpperCAmelCase : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
UpperCAmelCase : int = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=True ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
UpperCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
if pil_image:
UpperCAmelCase : Union[str, Any] = input_image * 0.5 + 0.5
UpperCAmelCase : List[str] = input_image.clamp(0 , 1 )
UpperCAmelCase : Optional[Any] = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase : Union[str, Any] = DiffusionPipeline.numpy_to_pil(_SCREAMING_SNAKE_CASE )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """cpu"""
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = output.images
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : int = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = """cpu"""
UpperCAmelCase : List[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = output.images
UpperCAmelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase : Optional[int] = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = """cpu"""
UpperCAmelCase : str = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
UpperCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = output.images
UpperCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
UpperCAmelCase : Dict = pipe(
**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
UpperCAmelCase : str = image[0, -3:, -3:, -1]
UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
UpperCAmelCase : Any = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict = torch.device("""cpu""" )
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : Optional[Any] = 1
UpperCAmelCase : List[Any] = self.get_dummy_components()
UpperCAmelCase : str = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
UpperCAmelCase : Tuple = pipe.decoder.dtype
UpperCAmelCase : Any = 1
UpperCAmelCase : List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
UpperCAmelCase : Dict = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
UpperCAmelCase : List[Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
UpperCAmelCase : Optional[int] = pipe.prepare_latents(
_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , latents=_SCREAMING_SNAKE_CASE , scheduler=DummyScheduler() )
UpperCAmelCase : str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE , pil_image=_SCREAMING_SNAKE_CASE )
# Don't pass image, instead pass embedding
UpperCAmelCase : Dict = pipeline_inputs.pop("""image""" )
UpperCAmelCase : int = pipe.image_encoder(_SCREAMING_SNAKE_CASE ).image_embeds
UpperCAmelCase : Dict = pipe(
**_SCREAMING_SNAKE_CASE , decoder_latents=_SCREAMING_SNAKE_CASE , super_res_latents=_SCREAMING_SNAKE_CASE , image_embeddings=_SCREAMING_SNAKE_CASE , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
UpperCAmelCase : str = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=_SCREAMING_SNAKE_CASE )
@skip_mps
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : int = torch_device == """cpu"""
UpperCAmelCase : int = True
UpperCAmelCase : Optional[Any] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
UpperCAmelCase : Optional[int] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_SCREAMING_SNAKE_CASE , additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_SCREAMING_SNAKE_CASE )
@skip_mps
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
UpperCAmelCase : Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
UpperCAmelCase : Dict = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : List[Any] = pipeline(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
UpperCAmelCase : str = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 15 )
| 361
|
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=sys.maxsize ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = """bilinear"""
UpperCAmelCase : Tuple = max_size
UpperCAmelCase : Optional[Any] = short_edge_length
def __call__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = []
for img in imgs:
UpperCAmelCase , UpperCAmelCase : List[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCAmelCase : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCAmelCase : List[str] = size * 1.0 / min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if h < w:
UpperCAmelCase , UpperCAmelCase : Dict = size, scale * w
else:
UpperCAmelCase , UpperCAmelCase : Optional[int] = scale * h, size
if max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > self.max_size:
UpperCAmelCase : List[str] = self.max_size * 1.0 / max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = newh * scale
UpperCAmelCase : int = neww * scale
UpperCAmelCase : Tuple = int(neww + 0.5 )
UpperCAmelCase : Optional[int] = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCAmelCase : str = Image.fromarray(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCAmelCase : Optional[int] = np.asarray(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCAmelCase : Union[str, Any] = nn.functional.interpolate(
_SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=_SCREAMING_SNAKE_CASE ).squeeze(0 )
img_augs.append(_SCREAMING_SNAKE_CASE )
return img_augs
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCAmelCase : List[Any] = cfg.INPUT.FORMAT
UpperCAmelCase : Dict = cfg.SIZE_DIVISIBILITY
UpperCAmelCase : Dict = cfg.PAD_VALUE
UpperCAmelCase : Tuple = cfg.INPUT.MAX_SIZE_TEST
UpperCAmelCase : Any = cfg.MODEL.DEVICE
UpperCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase : List[str] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCAmelCase : Optional[Any] = lambda _SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] = tuple(max(_SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) )
UpperCAmelCase : List[Any] = [im.shape[-2:] for im in images]
UpperCAmelCase : Any = [
nn.functional.pad(
_SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
return torch.stack(_SCREAMING_SNAKE_CASE ), torch.tensor(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
with torch.no_grad():
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = [images]
if single_image:
assert len(_SCREAMING_SNAKE_CASE ) == 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_SCREAMING_SNAKE_CASE , images.pop(_SCREAMING_SNAKE_CASE ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(_SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCAmelCase : str = torch.tensor([im.shape[:2] for im in images] )
UpperCAmelCase : int = self.aug(_SCREAMING_SNAKE_CASE )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCAmelCase : Any = [self.normalizer(_SCREAMING_SNAKE_CASE ) for x in images]
# now pad them to do the following operations
UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.pad(_SCREAMING_SNAKE_CASE )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCAmelCase : int = torch.true_divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Dict ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Tuple[int, int] ):
assert torch.isfinite(UpperCamelCase ).all(), "Box tensor contains infinite or NaN!"
UpperCAmelCase , UpperCAmelCase : str = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase )
| 76
| 0
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = nn.functional.normalize(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = nn.functional.normalize(_UpperCAmelCase )
return torch.mm(_UpperCAmelCase , normalized_text_embeds.t() )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = CLIPConfig
_UpperCAmelCase : Optional[int] = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , lowerCAmelCase__ : CLIPConfig):
super().__init__(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = CLIPVisionModel(config.vision_config)
SCREAMING_SNAKE_CASE_: List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = nn.Parameter(torch.ones(17 , config.projection_dim) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = nn.Parameter(torch.ones(17) , requires_grad=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = nn.Parameter(torch.ones(3) , requires_grad=lowerCAmelCase__)
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self.vision_model(lowerCAmelCase__)[1] # pooled_output
SCREAMING_SNAKE_CASE_: Tuple = self.visual_projection(lowerCAmelCase__)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_: Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds).cpu().float().numpy()
SCREAMING_SNAKE_CASE_: Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds).cpu().float().numpy()
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Dict = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_: List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0])):
SCREAMING_SNAKE_CASE_: int = special_cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_: Union[str, Any] = self.special_care_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_: str = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
SCREAMING_SNAKE_CASE_: Tuple = 0.01
for concept_idx in range(len(cos_dist[0])):
SCREAMING_SNAKE_CASE_: List[str] = cos_dist[i][concept_idx]
SCREAMING_SNAKE_CASE_: Any = self.concept_embeds_weights[concept_idx].item()
SCREAMING_SNAKE_CASE_: Optional[int] = round(concept_cos - concept_threshold + adjustment , 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__)
result.append(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = [len(res["bad_concepts"]) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor):
SCREAMING_SNAKE_CASE_: Optional[Any] = self.vision_model(lowerCAmelCase__)[1] # pooled_output
SCREAMING_SNAKE_CASE_: Tuple = self.visual_projection(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = cosine_distance(lowerCAmelCase__ , self.special_care_embeds)
SCREAMING_SNAKE_CASE_: str = cosine_distance(lowerCAmelCase__ , self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
SCREAMING_SNAKE_CASE_: Dict = 0.0
SCREAMING_SNAKE_CASE_: List[str] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_: List[str] = torch.any(special_scores > 0 , dim=1)
SCREAMING_SNAKE_CASE_: str = special_care * 0.01
SCREAMING_SNAKE_CASE_: Optional[Any] = special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1])
SCREAMING_SNAKE_CASE_: Any = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
SCREAMING_SNAKE_CASE_: Optional[int] = torch.any(concept_scores > 0 , dim=1)
return images, has_nsfw_concepts
| 13
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowercase__ ={
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
lowercase__ ={
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Optional[int] = RoFormerTokenizer
def __init__(self : List[str] , snake_case_ : Optional[int]=None , snake_case_ : str=None , snake_case_ : Optional[Any]=True , snake_case_ : str="[UNK]" , snake_case_ : Dict="[SEP]" , snake_case_ : Any="[PAD]" , snake_case_ : str="[CLS]" , snake_case_ : List[Any]="[MASK]" , snake_case_ : Any=True , snake_case_ : List[str]=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , snake_case_ ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , snake_case_ ) != strip_accents
):
__a : List[str] = getattr(snake_case_ , pre_tok_state.pop('''type''' ) )
__a : Optional[Any] = do_lower_case
__a : Optional[int] = strip_accents
__a : List[str] = pre_tok_class(**snake_case_ )
__a : Optional[Any] = do_lower_case
def __getstate__(self : Union[str, Any] ):
__a : Any = self.__dict__.copy()
__a : Union[str, Any] = BertPreTokenizer()
return state
def __setstate__(self : Tuple , snake_case_ : Optional[Any] ):
__a : Dict = d
__a : str = self.__dict__['''_tokenizer'''].get_vocab()
__a : Optional[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=None ):
__a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase (self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : int = [self.sep_token_id]
__a : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : Optional[Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase (self : Dict , snake_case_ : Dict , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
__a : List[str] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 216
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
A : Optional[int] = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
A__ = {}
A__ = job["""started_at"""]
A__ = job["""completed_at"""]
A__ = date_parser.parse(__a )
A__ = date_parser.parse(__a )
A__ = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ = start
A__ = end
A__ = duration_in_min
return job_info
def __lowerCamelCase ( __a :Optional[Any] , __a :List[str]=None ) -> List[Any]:
"""simple docstring"""
A__ = None
if token is not None:
A__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'}
A__ = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ = requests.get(__a , headers=__a ).json()
A__ = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
A__ = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(__a ):
A__ = requests.get(url + F'&page={i + 2}' , headers=__a ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(__a ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
if __name__ == "__main__":
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
A : Dict = parser.parse_args()
A : List[Any] = get_job_time(args.workflow_run_id)
A : int = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v["duration"]}''')
| 276
| 1
|
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> List[Any]:
'''simple docstring'''
model.train()
lowercase_ = model(__lowerCAmelCase )
lowercase_ = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> List[Any]:
'''simple docstring'''
set_seed(42 )
lowercase_ = RegressionModel()
lowercase_ = deepcopy(__lowerCAmelCase )
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase_ = AdamW(params=model.parameters() , lr=1E-3 )
lowercase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowercase_ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 )
lowercase_ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
# Use a single batch
lowercase_ , lowercase_ = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
# Use a single batch
lowercase_ , lowercase_ = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
lowercase_ = RegressionDataset(length=96 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 136
|
"""simple docstring"""
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if len(__lowerCAmelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCAmelCase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowercase_ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__lowerCAmelCase ) )
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(__lowerCAmelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowercase_ = len(__lowerCAmelCase )
lowercase_ = matrix_length // 2
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowercase_ = [
[a[i][j] for j in range(__lowerCAmelCase , __lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )
]
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase )]
lowercase_ = [[a[i][j] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase , __lowerCAmelCase )]
return top_left, top_right, bot_left, bot_right
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple[int, int]:
'''simple docstring'''
return len(__lowerCAmelCase ), len(matrix[0] )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> None:
'''simple docstring'''
print("""\n""".join(str(__lowerCAmelCase ) for line in matrix ) )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase ) == (2, 2):
return default_matrix_multiplication(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = split_matrix(__lowerCAmelCase )
lowercase_ = actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = actual_strassen(__lowerCAmelCase , matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = actual_strassen(matrix_subtraction(__lowerCAmelCase , __lowerCAmelCase ) , matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) )
lowercase_ = matrix_addition(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
lowercase_ = matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = matrix_addition(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase ) , __lowerCAmelCase )
# construct the new matrix from our 4 quadrants
lowercase_ = []
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__lowerCAmelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> list:
'''simple docstring'''
if matrix_dimensions(__lowerCAmelCase )[1] != matrix_dimensions(__lowerCAmelCase )[0]:
lowercase_ = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
F'''Matrix A: {matrixa}\n'''
F'''Matrix B: {matrixa}'''
)
raise Exception(__lowerCAmelCase )
lowercase_ = matrix_dimensions(__lowerCAmelCase )
lowercase_ = matrix_dimensions(__lowerCAmelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase_ = max(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase_ = int(math.pow(2 , math.ceil(math.loga(__lowerCAmelCase ) ) ) )
lowercase_ = matrixa
lowercase_ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase_ = actual_strassen(__lowerCAmelCase , __lowerCAmelCase )
# Removing the additional zeros
for i in range(0 , __lowerCAmelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __lowerCAmelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
UpperCAmelCase : List[Any] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
UpperCAmelCase : Optional[int] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 136
| 1
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case( ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
lowercase : Dict = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__magic_name__ )
# Let's go
lowercase : Optional[Any] = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase : int = args.func(__magic_name__ )
service.run()
if __name__ == "__main__":
main()
| 116
|
import math
def snake_case( __magic_name__ ) -> bool:
'''simple docstring'''
lowercase : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def snake_case( __magic_name__ = 1 / 1_23_45 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = 0
lowercase : str = 0
lowercase : Optional[int] = 3
while True:
lowercase : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
lowercase : Any = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 116
| 1
|
"""simple docstring"""
A = [
(1_000, '''M'''),
(900, '''CM'''),
(500, '''D'''),
(400, '''CD'''),
(100, '''C'''),
(90, '''XC'''),
(50, '''L'''),
(40, '''XL'''),
(10, '''X'''),
(9, '''IX'''),
(5, '''V'''),
(4, '''IV'''),
(1, '''I'''),
]
def __A ( a_ :str) -> int:
__a : str = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
__a : List[Any] = 0
__a : List[str] = 0
while place < len(a_):
if (place + 1 < len(a_)) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __A ( a_ :int) -> str:
__a : Union[str, Any] = []
for arabic, roman in ROMAN:
((__a) , (__a)) : Any = divmod(a_ , a_)
result.append(roman * factor)
if number == 0:
break
return "".join(a_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
A = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = RobertaTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , )
__a : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
__a : Tuple = getattr(_UpperCAmelCase , pre_tok_state.pop('''type''' ) )
__a : Dict = add_prefix_space
__a : str = pre_tok_class(**_UpperCAmelCase )
__a : Optional[int] = add_prefix_space
__a : str = '''post_processor'''
__a : int = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
if tokenizer_component_instance:
__a : Dict = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__a : Optional[Any] = tuple(state['''sep'''] )
if "cls" in state:
__a : List[Any] = tuple(state['''cls'''] )
__a : Optional[Any] = False
if state.get('''add_prefix_space''' , _UpperCAmelCase ) != add_prefix_space:
__a : Any = add_prefix_space
__a : List[Any] = True
if state.get('''trim_offsets''' , _UpperCAmelCase ) != trim_offsets:
__a : List[Any] = trim_offsets
__a : List[str] = True
if changes_to_apply:
__a : Any = getattr(_UpperCAmelCase , state.pop('''type''' ) )
__a : Any = component_class(**_UpperCAmelCase )
setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value
__a : Tuple = value
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : Tuple = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
__a : str = kwargs.get('''is_split_into_words''' , _UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Optional[int] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
__a : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Dict = [self.sep_token_id]
__a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 160
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22
|
def A(__a: Tuple ):
lowerCAmelCase_ = len(__a )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__a )]
# Reverse whole list
lowerCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__a )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 22
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def _snake_case ( snake_case__ : Optional[Any] ):
A = SwinConfig()
A = swin_name.split('_' )
A = name_split[1]
A = int(name_split[4] )
A = int(name_split[3][-1] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "in22k" in swin_name:
A = 2_1841
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[str] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swin.' + name
return name
def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[
dim : dim * 2, :
]
A = val[-dim:, :]
else:
A = val[
:dim
]
A = val[
dim : dim * 2
]
A = val[
-dim:
]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swin_config(snake_case__ )
A = SwinForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 74
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : Path , snake_case_ : str = None , snake_case_ : str = None , snake_case_ : str = None , ) ->List[Any]:
if config_name_or_path is None:
lowerCamelCase__ : Dict ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ : Optional[int] =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ : Optional[int] =question_encoder_name_or_path
lowerCamelCase__ : Optional[Any] =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCamelCase__ : Union[str, Any] =RagConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[int] =gen_config
lowerCamelCase__ : str =question_encoder_config
lowerCamelCase__ : str =model_class.from_pretrained_question_encoder_generator(
snake_case_ , snake_case_ , config=snake_case_ )
rag_model.save_pretrained(snake_case_ )
# Sanity check.
model_class.from_pretrained(snake_case_ )
# Save tokenizers.
lowerCamelCase__ : str =AutoTokenizer.from_pretrained(snake_case_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(snake_case_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 126
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase : Union[str, Any] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 366
|
def A_ ( A__ , A__ , A__ ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
a__ : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
a__ : List[Any] = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 0
|
def a_ ( _A ) -> bool:
"""simple docstring"""
if num < 0:
return False
snake_case__ = num
snake_case__ = 0
while num > 0:
snake_case__ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __SCREAMING_SNAKE_CASE:
def __init__( self: int , UpperCamelCase: List[str] , UpperCamelCase: str=13 , UpperCamelCase: int=7 , UpperCamelCase: Any=True , UpperCamelCase: Dict=True , UpperCamelCase: Dict=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=99 , UpperCamelCase: Dict=32 , UpperCamelCase: Optional[Any]=5 , UpperCamelCase: Union[str, Any]=4 , UpperCamelCase: List[str]=37 , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: Union[str, Any]=5_12 , UpperCamelCase: str=16 , UpperCamelCase: int=2 , UpperCamelCase: Optional[int]=0.02 , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: Dict=4 , UpperCamelCase: List[str]=None , ) -> List[str]:
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Any , UpperCamelCase: List[Any] , UpperCamelCase: str ) -> Dict:
snake_case__ = LlamaModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
snake_case__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , ) -> str:
snake_case__ = True
snake_case__ = LlamaModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: int , UpperCamelCase: Optional[Any] , ) -> Any:
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: List[str] , ) -> Union[str, Any]:
snake_case__ = True
snake_case__ = True
snake_case__ = LlamaForCausalLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
# first forward pass
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , use_cache=UpperCamelCase , )
snake_case__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
snake_case__ = model(
UpperCamelCase , attention_mask=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , output_hidden_states=UpperCamelCase , )['hidden_states'][0]
# select random slice
snake_case__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) = config_and_inputs
snake_case__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE( a_ , a_ , a_ , unittest.TestCase ):
_UpperCAmelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase = (LlamaForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = LlamaModelTester(self )
snake_case__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: int ) -> int:
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: str ) -> Union[str, Any]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'single_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self: Dict ) -> int:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = 3
snake_case__ = 'multi_label_classification'
snake_case__ = input_dict['input_ids']
snake_case__ = input_ids.ne(1 ).to(UpperCamelCase )
snake_case__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ = LlamaForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
snake_case__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self: Dict ) -> Any:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Optional[Any] ) -> List[str]:
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = ids_tensor([1, 10] , config.vocab_size )
snake_case__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = LlamaModel(UpperCamelCase )
original_model.to(UpperCamelCase )
original_model.eval()
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
snake_case__ = original_model(UpperCamelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ = {'type': scaling_type, 'factor': 10.0}
snake_case__ = LlamaModel(UpperCamelCase )
scaled_model.to(UpperCamelCase )
scaled_model.eval()
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
snake_case__ = scaled_model(UpperCamelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self: int ) -> List[Any]:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
# Expected mean on dim = -1
snake_case__ = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
snake_case__ = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Tuple:
snake_case__ = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
snake_case__ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
snake_case__ = model(torch.tensor(UpperCamelCase ) )
snake_case__ = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase , atol=1e-2 , rtol=1e-2 )
# fmt: off
snake_case__ = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
snake_case__ = 'Simply put, the theory of relativity states that '
snake_case__ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
snake_case__ = tokenizer.encode(UpperCamelCase , return_tensors='pt' )
snake_case__ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=UpperCamelCase )
# greedy generation outputs
snake_case__ = model.generate(UpperCamelCase , max_new_tokens=64 , top_p=UpperCamelCase , temperature=1 , do_sample=UpperCamelCase )
snake_case__ = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
| 307
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase__ : int = s_dict.pop(snake_case )
elif "subsample" in key:
UpperCAmelCase__ : int = s_dict.pop(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = emb.weight.shape
UpperCAmelCase__ : List[Any] = nn.Linear(snake_case , snake_case , bias=snake_case )
UpperCAmelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : int )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : int = mam_aaa["args"]
UpperCAmelCase__ : int = mam_aaa["model"]
UpperCAmelCase__ : Union[str, Any] = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(snake_case )
rename_keys(snake_case )
UpperCAmelCase__ : List[str] = state_dict["decoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ : Optional[Any] = args.share_decoder_input_output_embed
UpperCAmelCase__ : int = [int(snake_case ) for i in args.conv_kernel_sizes.split("," )]
UpperCAmelCase__ : Tuple = SpeechaTextConfig(
vocab_size=snake_case , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(snake_case ) , conv_channels=args.conv_channels , conv_kernel_sizes=snake_case , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=snake_case , num_beams=5 , max_length=200 , use_cache=snake_case , decoder_start_token_id=2 , early_stopping=snake_case , )
UpperCAmelCase__ : Optional[int] = SpeechaTextForConditionalGeneration(snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = model.model.load_state_dict(snake_case , strict=snake_case )
if len(snake_case ) > 0 and not set(snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f' but all the following weights are missing {missing}' )
if tie_embeds:
UpperCAmelCase__ : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase__ : List[Any] = lm_head_weights
model.save_pretrained(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 298
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298
| 1
|
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase_ : Optional[Any] = """src/diffusers"""
# Matches is_xxx_available()
lowerCamelCase_ : Dict = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
lowerCamelCase_ : Tuple = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
lowerCamelCase_ : List[str] = """
{0} = None
"""
lowerCamelCase_ : str = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
lowerCamelCase_ : Tuple = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def _A ( lowercase ):
"""simple docstring"""
a =_re_backend.findall(lowercase )
if len(lowercase ) == 0:
return None
return "_and_".join(lowercase )
def _A ( ):
"""simple docstring"""
with open(os.path.join(lowercase , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.readlines()
# Get to the point we do the actual imports for type checking
a =0
a ={}
# Go through the end of the file
while line_index < len(lowercase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
a =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
a =[]
# Until we unindent, add backend objects to the list
while line_index < len(lowercase ) and len(lines[line_index] ) > 1:
a =lines[line_index]
a =_re_single_line_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(lowercase ) > 0:
a =objects
else:
line_index += 1
return backend_specific_objects
def _A ( lowercase , lowercase ):
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(lowercase )
elif name.islower():
return DUMMY_FUNCTION.format(lowercase , lowercase )
else:
return DUMMY_CLASS.format(lowercase , lowercase )
def _A ( lowercase=None ):
"""simple docstring"""
if backend_specific_objects is None:
a =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
a ={}
for backend, objects in backend_specific_objects.items():
a ='''[''' + ''', '''.join(f'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
a ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(lowercase , lowercase ) for o in objects] )
a =dummy_file
return dummy_files
def _A ( lowercase=False ):
"""simple docstring"""
a =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
a ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
a =os.path.join(lowercase , '''utils''' )
a ={
backend: os.path.join(lowercase , f'''dummy_{short_names.get(lowercase , lowercase )}_objects.py''' )
for backend in dummy_files.keys()
}
a ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(lowercase ):
with open(lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a =f.read()
else:
a =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f'''Updating diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
f'''diffusers.utils.dummy_{short_names.get(lowercase , lowercase )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase_ : Optional[Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 81
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
lowercase__ : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __lowercase ( ):
snake_case_ : Optional[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case_ : Any = g.get_repo('''huggingface/diffusers''' )
snake_case_ : Any = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case_ : str = sorted(issue.get_comments() , key=lambda _a : i.created_at , reverse=_a )
snake_case_ : Dict = comments[0] if len(_a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 264
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = ort.SessionOptions()
__lowerCamelCase : List[Any] = False
return options
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
__lowerCamelCase : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
__lowerCamelCase : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Dict = "A red cat sitting on a park bench"
__lowerCamelCase : List[str] = np.random.RandomState(0 )
__lowerCamelCase : str = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type="np" , )
__lowerCamelCase : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 64
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( a__ ):
snake_case__ = "efficientnet"
def __init__( self : Dict , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : Union[str, Any] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : Any = width_coefficient
__lowerCamelCase : Any = depth_coefficient
__lowerCamelCase : Any = depth_divisor
__lowerCamelCase : Optional[Any] = kernel_sizes
__lowerCamelCase : Union[str, Any] = in_channels
__lowerCamelCase : List[Any] = out_channels
__lowerCamelCase : Optional[Any] = depthwise_padding
__lowerCamelCase : int = strides
__lowerCamelCase : int = num_block_repeats
__lowerCamelCase : Optional[Any] = expand_ratios
__lowerCamelCase : int = squeeze_expansion_ratio
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dim
__lowerCamelCase : Union[str, Any] = pooling_type
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = batch_norm_eps
__lowerCamelCase : Optional[int] = batch_norm_momentum
__lowerCamelCase : Any = dropout_rate
__lowerCamelCase : List[Any] = drop_connect_rate
__lowerCamelCase : int = sum(UpperCAmelCase ) * 4
class _snake_case ( a__ ):
snake_case__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ):
return 1E-5
| 64
| 1
|
"""simple docstring"""
_lowercase : Dict = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 238
|
"""simple docstring"""
from functools import lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =2
lowerCamelCase__ : Optional[int] =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__lowerCamelCase )
if n > 1:
factors.add(__lowerCamelCase )
return factors
@lru_cache
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
return len(unique_prime_factors(__lowerCamelCase ) )
def snake_case__ ( __lowerCamelCase : list ):
"""simple docstring"""
return len(set(__lowerCamelCase ) ) in (0, 1)
def snake_case__ ( __lowerCamelCase : int ):
"""simple docstring"""
lowerCamelCase__ : Tuple =2
while True:
# Increment each value of a generated range
lowerCamelCase__ : Tuple =[base + i for i in range(__lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowerCamelCase__ : Optional[Any] =[upf_len(__lowerCamelCase ) for x in group]
checker.append(__lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(__lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def snake_case__ ( __lowerCamelCase : int = 4 ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =run(__lowerCamelCase )
return results[0] if len(__lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 238
| 1
|
"""simple docstring"""
from __future__ import annotations
def snake_case_ ( A_ : float, A_ : float, A_ : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def snake_case_ ( A_ : float, A_ : float, A_ : float, ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def snake_case_ ( A_ : float, A_ : float, A_ : float, ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
A_, nominal_annual_percentage_rate / 3_65, number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( _lowercase , unittest.TestCase):
# TODO: is there an appropriate internal test set?
snake_case__ : List[str] = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int=0 ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
_lowerCamelCase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCamelCase : Any = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = self.get_dummy_inputs()
_lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.get_dummy_inputs()
_lowerCamelCase : str = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = self.get_dummy_inputs()
_lowerCamelCase : Tuple = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Union[str, Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_lowerCamelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.get_dummy_inputs()
_lowerCamelCase : List[Any] = pipe(**__lowerCAmelCase ).images
_lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : Optional[int] = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase):
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = ort.SessionOptions()
_lowerCamelCase : List[str] = False
return options
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : Any = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
_lowerCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = '''A fantasy landscape, trending on artstation'''
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_lowerCamelCase : int = init_image.resize((1_2_8, 1_2_8) )
_lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = '''A fantasy landscape, trending on artstation'''
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : List[str] = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCAmelCase , output_type='''np''' , )
_lowerCamelCase : Union[str, Any] = output.images
_lowerCamelCase : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_lowerCamelCase : str = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 175
| 0
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __a ( A__ ):
_lowerCAmelCase : List[Any] = (PNDMScheduler,)
_lowerCAmelCase : Dict = (('''num_inference_steps''', 5_0),)
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : str=0 , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Any = dict(self.forward_default_kwargs )
UpperCamelCase__ : int = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.dummy_sample
UpperCamelCase__ : Optional[int] = 0.1 * sample
UpperCamelCase__ : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Tuple = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase__ : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
UpperCamelCase__ : str = dummy_past_residuals[:]
UpperCamelCase__ : Any = scheduler.step_prk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Optional[int] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase__ : Optional[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Union[str, Any] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Dict=0 , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : str = dict(self.forward_default_kwargs )
UpperCamelCase__ : Optional[int] = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.dummy_sample
UpperCamelCase__ : List[Any] = 0.1 * sample
UpperCamelCase__ : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : List[str] = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
UpperCamelCase__ : List[Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
UpperCamelCase__ : Optional[int] = dummy_past_residuals[:]
UpperCamelCase__ : int = scheduler.step_prk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Dict = new_scheduler.step_prk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
UpperCamelCase__ : int = scheduler.step_plms(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : str = new_scheduler.step_plms(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : str = self.scheduler_classes[0]
UpperCamelCase__ : Tuple = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = 10
UpperCamelCase__ : Optional[Any] = self.dummy_model()
UpperCamelCase__ : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCamelCase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCamelCase__ : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = scheduler.step_plms(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Dict = dict(self.forward_default_kwargs )
UpperCamelCase__ : Optional[int] = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Any = self.get_scheduler_config()
UpperCamelCase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = self.dummy_sample
UpperCamelCase__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , "set_timesteps" ):
UpperCamelCase__ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCamelCase__ : List[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
UpperCamelCase__ : List[str] = dummy_past_residuals[:]
UpperCamelCase__ : Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : Optional[int] = scheduler.step_prk(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCamelCase__ : Any = scheduler.step_plms(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase__ : str = scheduler.step_plms(SCREAMING_SNAKE_CASE , 1 , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : List[str] = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Tuple ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
UpperCamelCase__ : Union[str, Any] = self.dummy_sample
UpperCamelCase__ : List[str] = 0.1 * sample
UpperCamelCase__ : Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCamelCase__ : List[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[str] = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config()
UpperCamelCase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.full_loop()
UpperCamelCase__ : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__ : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE , beta_start=0.0_1 )
UpperCamelCase__ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : int = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE , beta_start=0.0_1 )
UpperCamelCase__ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 189
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
UpperCamelCase__ , UpperCamelCase__ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def SCREAMING_SNAKE_CASE ( ) -> str:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 189
| 1
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE : Any = logging.getLogger()
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = os.path.join(snake_case , "all_results.json" )
if os.path.exists(snake_case ):
with open(snake_case , "r" ) as f:
snake_case_ = json.load(snake_case )
else:
raise ValueError(f'can\'t find {path}' )
return results
_SCREAMING_SNAKE_CASE : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
import xla_spawn
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(a__ , "argv" , a__ ):
snake_case_ = time()
xla_spawn.main()
snake_case_ = time()
snake_case_ = get_results(a__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
import xla_spawn
snake_case_ = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(a__ , "argv" , a__ ):
xla_spawn.main()
| 359
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 92
| 0
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = args.pruning_method
lowerCAmelCase_ : Dict = args.threshold
lowerCAmelCase_ : List[Any] = args.model_name_or_path.rstrip('/' )
lowerCAmelCase_ : Optional[Any] = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
lowerCAmelCase_ : Optional[Any] = torch.load(os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) )
lowerCAmelCase_ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase_ : Any = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase_ : Tuple = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
lowerCAmelCase_ : List[str] = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
lowerCAmelCase_ : int = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase_ : Tuple = name[:-6]
lowerCAmelCase_ : Tuple = model[f"{prefix_}mask_scores"]
lowerCAmelCase_ : int = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : str = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase_ : List[str] = name[:-6]
lowerCAmelCase_ : List[str] = model[f"{prefix_}mask_scores"]
lowerCAmelCase_ : List[str] = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : str = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase_ : Optional[Any] = name[:-6]
lowerCAmelCase_ : str = model[f"{prefix_}mask_scores"]
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[Any] = -0.1, 1.1
lowerCAmelCase_ : Dict = torch.sigmoid(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = s * (r - l) + l
lowerCAmelCase_ : str = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase_ : int = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase_ : Optional[int] = os.path.join(
os.path.dirname(lowerCAmelCase__ ) , f"bertarized_{os.path.basename(lowerCAmelCase__ )}" )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ )
print(f"\nCreated folder {target_model_path}" )
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
lowercase__ : str = parser.parse_args()
main(args)
| 224
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """informer"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "student_t" , SCREAMING_SNAKE_CASE_ : str = "nll" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : List[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str = "prob" , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ):
# time series specific configuration
lowerCAmelCase_ : Dict = prediction_length
lowerCAmelCase_ : List[str] = context_length or prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : int = loss
lowerCAmelCase_ : Optional[int] = input_size
lowerCAmelCase_ : Tuple = num_time_features
lowerCAmelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : int = scaling
lowerCAmelCase_ : List[Any] = num_dynamic_real_features
lowerCAmelCase_ : Union[str, Any] = num_static_real_features
lowerCAmelCase_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : str = cardinality
else:
lowerCAmelCase_ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : Optional[Any] = decoder_attention_heads
lowerCAmelCase_ : Any = encoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : Dict = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Dict = encoder_layerdrop
lowerCAmelCase_ : str = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Informer
lowerCAmelCase_ : Optional[int] = attention_type
lowerCAmelCase_ : Any = sampling_factor
lowerCAmelCase_ : int = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 224
| 1
|
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _a ( _lowercase):
def __init__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Optional[int]=None , **_SCREAMING_SNAKE_CASE : Tuple )-> List[Any]:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Optional[int] = config_class
lowerCAmelCase__ : Tuple = has_text_modality
lowerCAmelCase__ : Tuple = kwargs
lowerCAmelCase__ : Dict = common_properties
def UpperCAmelCase__( self : Any )-> int:
lowerCAmelCase__ : Union[str, Any] = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : Any = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(_SCREAMING_SNAKE_CASE ):
try:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , msg=F'`{name} value {idx} expected, but was {getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_SCREAMING_SNAKE_CASE ):
try:
lowerCAmelCase__ : str = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , msg=F'`{name} value {idx} expected, but was {getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCAmelCase__( self : List[Any] )-> Dict:
lowerCAmelCase__ : List[str] = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : Optional[int] = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] )-> Union[str, Any]:
lowerCAmelCase__ : int = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , '''config.json''' )
config_first.to_json_file(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = self.config_class.from_json_file(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__( self : List[Any] )-> List[str]:
lowerCAmelCase__ : List[Any] = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = self.config_class.from_pretrained(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__( self : Any )-> int:
lowerCAmelCase__ : Tuple = self.config_class(**self.inputs_dict )
lowerCAmelCase__ : str = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
config_first.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = self.config_class.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
lowerCAmelCase__ : List[str] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowerCAmelCase__ : Any = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCAmelCase__( self : Optional[Any] )-> str:
if self.config_class.is_composition:
return
lowerCAmelCase__ : Dict = self.config_class()
self.parent.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Dict:
lowerCAmelCase__ : Dict = copy.deepcopy(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.config_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != value:
wrong_values.append((key, getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), value) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase__ : Union[str, Any] = '''\n'''.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def UpperCAmelCase__( self : Union[str, Any] )-> Tuple:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 355
|
from math import isqrt
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _a , _a ):
lowerCAmelCase__ : int = False
return [i for i in range(2 , _a ) if is_prime[i]]
def lowerCamelCase_ ( _a = 10**8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = calculate_prime_numbers(max_number // 2 )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Optional[int] = len(_a ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211
| 0
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class A_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case_ : Optional[Any] ):
_UpperCAmelCase = parent
def lowercase ( self : str ):
return {}
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
_UpperCAmelCase = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase ( self : List[str] ):
_UpperCAmelCase = MarkupLMFeatureExtractionTester(self )
@property
def lowercase ( self : int ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase ( self : List[Any] ):
# Initialize feature_extractor
_UpperCAmelCase = self.feature_extraction_class()
# Test not batched input
_UpperCAmelCase = get_html_strings()[0]
_UpperCAmelCase = feature_extractor(snake_case_ )
# fmt: off
_UpperCAmelCase = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
_UpperCAmelCase = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , snake_case_ )
self.assertEqual(encoding.xpaths , snake_case_ )
# Test batched
_UpperCAmelCase = get_html_strings()
_UpperCAmelCase = feature_extractor(snake_case_ )
# fmt: off
_UpperCAmelCase = expected_nodes + [["My First Heading", "My first paragraph."]]
_UpperCAmelCase = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , snake_case_ )
self.assertEqual(encoding.xpaths , snake_case_ )
| 22
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE :Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : int = """perceiver"""
def __init__( self : Any , snake_case_ : List[Any]=2_5_6 , snake_case_ : str=1_2_8_0 , snake_case_ : Optional[int]=7_6_8 , snake_case_ : int=1 , snake_case_ : List[Any]=2_6 , snake_case_ : Dict=8 , snake_case_ : List[Any]=8 , snake_case_ : Tuple=None , snake_case_ : Tuple=None , snake_case_ : Any="kv" , snake_case_ : Any=1 , snake_case_ : List[str]=1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Dict=0.0_2 , snake_case_ : int=1e-12 , snake_case_ : List[str]=True , snake_case_ : str=2_6_2 , snake_case_ : Optional[Any]=2_0_4_8 , snake_case_ : Union[str, Any]=5_6 , snake_case_ : Dict=[3_6_8, 4_9_6] , snake_case_ : Tuple=1_6 , snake_case_ : Union[str, Any]=1_9_2_0 , snake_case_ : List[Any]=1_6 , snake_case_ : Tuple=[1, 1_6, 2_2_4, 2_2_4] , **snake_case_ : List[Any] , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_latents
_UpperCAmelCase = d_latents
_UpperCAmelCase = d_model
_UpperCAmelCase = num_blocks
_UpperCAmelCase = num_self_attends_per_block
_UpperCAmelCase = num_self_attention_heads
_UpperCAmelCase = num_cross_attention_heads
_UpperCAmelCase = qk_channels
_UpperCAmelCase = v_channels
_UpperCAmelCase = cross_attention_shape_for_attention
_UpperCAmelCase = self_attention_widening_factor
_UpperCAmelCase = cross_attention_widening_factor
_UpperCAmelCase = hidden_act
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_query_residual
# masked language modeling attributes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
# image classification attributes
_UpperCAmelCase = image_size
# flow attributes
_UpperCAmelCase = train_size
# multimodal autoencoding attributes
_UpperCAmelCase = num_frames
_UpperCAmelCase = audio_samples_per_frame
_UpperCAmelCase = samples_per_patch
_UpperCAmelCase = output_shape
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : int ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase ( self : Optional[Any] ):
return 1e-4
def lowercase ( self : List[str] , snake_case_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional[TensorType] = None , snake_case_ : int = 3 , snake_case_ : int = 4_0 , snake_case_ : int = 4_0 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(snake_case_ , snake_case_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = preprocessor.num_special_tokens_to_add(snake_case_ )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case_ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [" ".join(["a"] ) * seq_length] * batch_size
_UpperCAmelCase = dict(preprocessor(snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("input_ids" )
return inputs
elif isinstance(snake_case_ , snake_case_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(snake_case_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_UpperCAmelCase = self._generate_dummy_images(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_UpperCAmelCase = dict(preprocessor(images=snake_case_ , return_tensors=snake_case_ ) )
_UpperCAmelCase = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 22
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( _lowerCamelCase , unittest.TestCase ):
lowerCAmelCase__ = KandinskyInpaintPipeline
lowerCAmelCase__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowerCAmelCase__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowerCAmelCase__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ = False
@property
def _lowerCAmelCase (self :Dict )-> str:
return 32
@property
def _lowerCAmelCase (self :List[str] )-> List[str]:
return 32
@property
def _lowerCAmelCase (self :Dict )-> Optional[int]:
return self.time_input_dim
@property
def _lowerCAmelCase (self :List[str] )-> List[str]:
return self.time_input_dim * 4
@property
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
return 100
@property
def _lowerCAmelCase (self :Dict )-> str:
__A = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _lowerCAmelCase (self :Dict )-> Tuple:
torch.manual_seed(0 )
__A = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__A = MultilingualCLIP(_UpperCamelCase )
__A = text_encoder.eval()
return text_encoder
@property
def _lowerCAmelCase (self :List[str] )-> Union[str, Any]:
torch.manual_seed(0 )
__A = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__A = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def _lowerCAmelCase (self :List[str] )-> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase (self :Optional[Any] )-> str:
torch.manual_seed(0 )
__A = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase (self :Tuple )-> Optional[int]:
__A = self.dummy_text_encoder
__A = self.dummy_tokenizer
__A = self.dummy_unet
__A = self.dummy_movq
__A = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_UpperCamelCase , )
__A = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase (self :str , _UpperCamelCase :str , _UpperCamelCase :List[Any]=0 )-> Any:
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
__A = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_UpperCamelCase )
# create init_image
__A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
__A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__A = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
__A = np.ones((64, 64) , dtype=np.floataa )
__A = 0
if str(_UpperCamelCase ).startswith('''mps''' ):
__A = torch.manual_seed(_UpperCamelCase )
else:
__A = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
__A = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase (self :Optional[Any] )-> Any:
__A = '''cpu'''
__A = self.get_dummy_components()
__A = self.pipeline_class(**_UpperCamelCase )
__A = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
__A = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
__A = output.images
__A = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
__A = image[0, -3:, -3:, -1]
__A = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__A = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _lowerCAmelCase (self :int )-> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Dict )-> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase (self :Optional[Any] )-> List[str]:
__A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
__A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__A = np.ones((768, 768) , dtype=np.floataa )
__A = 0
__A = '''a hat'''
__A = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
__A = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
__A = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
__A = torch.Generator(device='''cpu''' ).manual_seed(0 )
__A , __A = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__A = pipeline(
_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
__A = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 250
|
def _a ( lowerCamelCase: str ) -> bool:
'''simple docstring'''
__A = [int(lowerCamelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCamelCase ) == 4 and all(0 <= int(lowerCamelCase ) <= 2_54 for octet in octets )
if __name__ == "__main__":
snake_case__ : Union[str, Any] = input().strip()
snake_case__ : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 250
| 1
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase__ ( __snake_case : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__UpperCAmelCase = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase ) -> str:
UpperCAmelCase_ : Union[str, Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=_lowerCAmelCase , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase , ) -> str:
UpperCAmelCase_ : str = logging.get_logger('transformers-cli/converting' )
self._logger.info(f"Loading model {model_type}" )
UpperCAmelCase_ : int = model_type
UpperCAmelCase_ : List[Any] = tf_checkpoint
UpperCAmelCase_ : int = pytorch_dump_output
UpperCAmelCase_ : str = config
UpperCAmelCase_ : List[Any] = finetuning_task_name
def __UpperCAmelCase ( self ) -> Optional[int]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase_ : Optional[int] = self._tf_checkpoint
UpperCAmelCase_ : Optional[int] = ''
else:
UpperCAmelCase_ : str = self._tf_checkpoint
UpperCAmelCase_ : Union[str, Any] = ''
convert_transfo_xl_checkpoint_to_pytorch(
_lowerCAmelCase , self._config , self._pytorch_dump_output , _lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 29
|
from __future__ import annotations
lowerCamelCase__ : Optional[int] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
lowerCamelCase__ : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
for i in range(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for j in range(i + 1 , __UpperCAmelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ = arr[j]
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = []
for i, outer in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ = inner
break
result.append(__UpperCAmelCase )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : list[float] ) -> list[float]:
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = [-1] * arr_size
for index in reversed(range(__UpperCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCamelCase__ : List[str] = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 225
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Tuple ='xlm-roberta'
def __init__( self, lowerCAmelCase=30_522, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-12, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=None, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =classifier_dropout
class __UpperCamelCase ( lowerCamelCase__ ):
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 369
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( __snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def a_ ( __snake_case : List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =emb.weight.shape
lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case )
lowerCamelCase_ =emb.weight.data
return lin_layer
def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict:
"""simple docstring"""
lowerCamelCase_ ={}
for old_key in state_dict.keys():
lowerCamelCase_ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' )
else:
lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
lowerCamelCase_ =state_dict[old_key]
return new_dict
def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[]
lowerCamelCase_ =0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(__snake_case ):
lowerCamelCase_ =torch.load(__snake_case )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =os.path.join(
__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) )
lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__snake_case )
lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case )
lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
lowerCamelCase_ =os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
lowerCamelCase_ ={}
for idx, shard in enumerate(__snake_case ):
lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' )
lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
lowerCamelCase_ =shard_file
# Add the metadata
lowerCamelCase_ ={'''total_size''': total_size}
lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n'''
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
a_ : Tuple = parser.parse_args()
a_ , a_ : int = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_28,
args.dtype,
)
a_ : Tuple = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 6
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = Dict[str, Any]
UpperCamelCase = List[Prediction]
@add_end_docstrings(__A )
class __UpperCAmelCase (__A ):
def __init__( self: List[str] , *UpperCAmelCase_: Optional[Any] , **UpperCAmelCase_: Tuple ):
'''simple docstring'''
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase ( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
_SCREAMING_SNAKE_CASE = kwargs["threshold"]
return {}, {}, postprocess_kwargs
def __call__( self: List[Any] , *UpperCAmelCase_: List[Any] , **UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = load_image(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.IntTensor([[image.height, image.width]] )
_SCREAMING_SNAKE_CASE = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = target_size
return inputs
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_inputs.pop("""target_size""" )
_SCREAMING_SNAKE_CASE = self.model(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
_SCREAMING_SNAKE_CASE = model_inputs["bbox"]
return model_outputs
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Union[str, Any]=0.9 ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_SCREAMING_SNAKE_CASE = target_size[0].tolist()
def unnormalize(UpperCAmelCase_: Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
_SCREAMING_SNAKE_CASE = model_outputs["logits"].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_SCREAMING_SNAKE_CASE = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_SCREAMING_SNAKE_CASE = [unnormalize(UpperCAmelCase_ ) for bbox in model_outputs["bbox"].squeeze(0 )]
_SCREAMING_SNAKE_CASE = ["score", "label", "box"]
_SCREAMING_SNAKE_CASE = [dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) for vals in zip(scores.tolist() , UpperCAmelCase_ , UpperCAmelCase_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = raw_annotations[0]
_SCREAMING_SNAKE_CASE = raw_annotation["scores"]
_SCREAMING_SNAKE_CASE = raw_annotation["labels"]
_SCREAMING_SNAKE_CASE = raw_annotation["boxes"]
_SCREAMING_SNAKE_CASE = scores.tolist()
_SCREAMING_SNAKE_CASE = [self.model.config.idalabel[label.item()] for label in labels]
_SCREAMING_SNAKE_CASE = [self._get_bounding_box(UpperCAmelCase_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_SCREAMING_SNAKE_CASE = ["score", "label", "box"]
_SCREAMING_SNAKE_CASE = [
dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
_SCREAMING_SNAKE_CASE = box.int().tolist()
_SCREAMING_SNAKE_CASE = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 306
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_a):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = offset
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a , size=a , resample=a , data_format=a , **a )
def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2)
return rescale(a , scale=a , data_format=a , **a )
def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a )
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a )
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a )
if do_rescale:
SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a )
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a )
SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a )
return image
def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a )
SCREAMING_SNAKE_CASE : List[Any] = [
[
self._preprocess_image(
image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=a , tensor_type=a )
| 76
| 0
|
"""simple docstring"""
import enum
import shutil
import sys
lowercase__ , lowercase__ = shutil.get_terminal_size()
lowercase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class __lowerCamelCase ( enum.Enum ):
'''simple docstring'''
a_ : int = 0
a_ : Union[str, Any] = 1
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase="" ) -> int:
"""simple docstring"""
sys.stdout.write(str(__UpperCamelCase ) + end )
sys.stdout.flush()
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ) -> Tuple:
"""simple docstring"""
forceWrite(f'''\u001b[{color}m{content}\u001b[0m''' , __UpperCamelCase )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
forceWrite("\r" )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
forceWrite(f'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
forceWrite(" " * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
reset_cursor()
forceWrite("-" * TERMINAL_WIDTH )
| 161
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 161
| 1
|
_snake_case = {str(digit): digit**5 for digit in range(10)}
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(SCREAMING_SNAKE_CASE_ ) )
def lowercase_( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(solution())
| 283
|
import argparse
_snake_case = '''docs/source/_static/js/custom.js'''
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase : List[str] = f.readlines()
lowerCamelCase : int = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
_snake_case = parser.parse_args()
update_custom_js(args.version)
| 283
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 105
| 1
|
def a__ ( UpperCAmelCase : str , UpperCAmelCase : str ) -> int:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError('''String lengths must match!''' )
UpperCAmelCase : Union[str, Any] = 0
for chara, chara in zip(UpperCAmelCase , UpperCAmelCase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """codegen"""
UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any, __A : Optional[int]=5_0_4_0_0, __A : Tuple=2_0_4_8, __A : Optional[int]=2_0_4_8, __A : List[str]=4_0_9_6, __A : List[str]=2_8, __A : Union[str, Any]=1_6, __A : Tuple=6_4, __A : Union[str, Any]=None, __A : Union[str, Any]="gelu_new", __A : Any=0.0, __A : Dict=0.0, __A : str=0.0, __A : Optional[int]=1E-5, __A : Any=0.0_2, __A : Any=True, __A : Union[str, Any]=5_0_2_5_6, __A : List[str]=5_0_2_5_6, __A : int=False, **__A : List[Any], ):
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Tuple = n_ctx
UpperCAmelCase : Tuple = n_positions
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : Union[str, Any] = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Tuple = n_inner
UpperCAmelCase : int = rotary_dim
UpperCAmelCase : List[Any] = activation_function
UpperCAmelCase : List[str] = resid_pdrop
UpperCAmelCase : Optional[Any] = embd_pdrop
UpperCAmelCase : str = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : List[str] = eos_token_id
super().__init__(
bos_token_id=__A, eos_token_id=__A, tie_word_embeddings=__A, **__A )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Any, __A : PretrainedConfig, __A : str = "default", __A : List[PatchingSpec] = None, __A : bool = False, ):
super().__init__(__A, task=__A, patching_specs=__A, use_past=__A )
if not getattr(self._config, '''pad_token_id''', __A ):
# TODO: how to do that better?
UpperCAmelCase : Union[str, Any] = 0
@property
def __magic_name__ ( self : str ):
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__A, direction='''inputs''' )
UpperCAmelCase : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __magic_name__ ( self : Dict ):
return self._config.n_layer
@property
def __magic_name__ ( self : List[str] ):
return self._config.n_head
def __magic_name__ ( self : str, __A : PreTrainedTokenizer, __A : int = -1, __A : int = -1, __A : bool = False, __A : Optional[TensorType] = None, ):
UpperCAmelCase : Union[str, Any] = super(__A, self ).generate_dummy_inputs(
__A, batch_size=__A, seq_length=__A, is_pair=__A, framework=__A )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase : Union[str, Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCAmelCase , UpperCAmelCase : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase : str = seqlen + 2
UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase : Optional[int] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
UpperCAmelCase : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase : Optional[Any] = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase : Dict = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__A, __A, dtype=__A )], dim=1 )
return ordered_inputs
@property
def __magic_name__ ( self : Tuple ):
return 1_3
| 336
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase: List[str] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( _A ):
if isinstance(a__ , torch.Tensor ):
return image
elif isinstance(a__ , PIL.Image.Image ):
a : Tuple = [image]
a : int = [trans(img.convert('RGB' ) ) for img in image]
a : Union[str, Any] = torch.stack(a__ )
return image
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : Tuple ):
super().__init__()
# make sure scheduler can always be converted to DDIM
a : Union[str, Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def lowercase_ ( self : str , __snake_case : List[Any] ):
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase_ ( self : Optional[Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Any ):
a : Dict = min(int(num_inference_steps * strength ) , lowercase_ )
a : List[Any] = max(num_inference_steps - init_timestep , 0 )
a : Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : int , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Optional[int]=None ):
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" )
a : Dict = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
a : List[Any] = init_latents.shape
a : Union[str, Any] = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
a : Optional[Any] = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
a : Any = init_latents
return latents
@torch.no_grad()
def __call__( self : List[Any] , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] = None , __snake_case : float = 0.8 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ):
self.check_inputs(lowercase_ )
# 2. Preprocess image
a : Dict = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
a , a : List[Any] = self.get_timesteps(lowercase_ , lowercase_ , self.device )
a : Union[str, Any] = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
a : List[Any] = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
a : Dict = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
a : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a : List[str] = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
a : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : Tuple = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase: Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = ['PoolFormerFeatureExtractor']
lowerCAmelCase: Tuple = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 96
| 0
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 207
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCamelCase__ = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : int = """▁"""
__snake_case : Optional[Any] = {"""vocab_file""": """spiece.model"""}
__snake_case : List[str] = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
__snake_case : List[str] = {
"""google/pegasus-xsum""": 5_12,
}
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __A):
_SCREAMING_SNAKE_CASE : List[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask']
def __init__( self , _UpperCamelCase , _UpperCamelCase="<pad>" , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<mask_2>" , _UpperCamelCase="<mask_1>" , _UpperCamelCase=None , _UpperCamelCase=1_03 , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(__lowercase , __lowercase ):
raise TypeError(
F"additional_special_tokens should be of type {type(__lowercase )}, but is"
F" {type(__lowercase )}" )
lowerCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(__lowercase ) , self.offset - 1 )
]
if len(set(__lowercase ) ) != len(__lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
lowerCAmelCase__ = additional_special_tokens_extended
else:
lowerCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowercase , unk_token=__lowercase , mask_token=__lowercase , pad_token=__lowercase , mask_token_sent=__lowercase , offset=__lowercase , additional_special_tokens=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
lowerCAmelCase__ = mask_token_sent
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowercase )
# add special tokens to encoder dict
lowerCAmelCase__ = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.offset
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase__ = self.sp_model.piece_to_id(__lowercase )
return sp_id + self.offset
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase__ = self.sp_model.IdToPiece(index - self.offset )
return token
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__lowercase ) + token
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__lowercase )
out_string += self.sp_model.decode(__lowercase )
return out_string.strip()
def UpperCamelCase__ ( self , _UpperCamelCase=False ):
"""simple docstring"""
return 1
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__lowercase )
elif token_ids_a is None:
return self._special_token_mask(__lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
__lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , 'wb' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 359
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__snake_case : Tuple = """sshleifer/mar_enro_6_3_student"""
class __SCREAMING_SNAKE_CASE ( __lowercase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_UpperCamelCase , )
lowerCAmelCase__ = F"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
MarianMTModel.from_pretrained(_UpperCamelCase )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
lowerCAmelCase__ = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowerCAmelCase__ = F"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowerCAmelCase__ = ['finetune.py'] + bash_script.split() + args
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationModule.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class __SCREAMING_SNAKE_CASE ( __lowercase):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = F"{self.test_file_dir_str}/test_data/wmt_en_ro"
lowerCAmelCase__ = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_28,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
lowerCAmelCase__ = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
lowerCAmelCase__ = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
lowerCAmelCase__ = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
lowerCAmelCase__ = bash_script.replace(_UpperCamelCase , str(_UpperCamelCase ) )
lowerCAmelCase__ = self.get_auto_remove_tmp_dir()
lowerCAmelCase__ = bash_script.replace('--fp16' , '' )
lowerCAmelCase__ = 6
lowerCAmelCase__ = (
['distillation.py']
+ bash_script.split()
+ [
F"--output_dir={output_dir}",
'--gpus=1',
'--learning_rate=1e-3',
F"--num_train_epochs={epochs}",
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = argparse.ArgumentParser()
lowerCAmelCase__ = pl.Trainer.add_argparse_args(_UpperCamelCase )
lowerCAmelCase__ = SummarizationDistiller.add_model_specific_args(_UpperCamelCase , os.getcwd() )
lowerCAmelCase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowerCAmelCase__ = distill_main(_UpperCamelCase )
# Check metrics
lowerCAmelCase__ = load_json(model.metrics_save_path )
lowerCAmelCase__ = metrics['val'][0]
lowerCAmelCase__ = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"val_avg_{model.val_metric}"] , _UpperCamelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
lowerCAmelCase__ = os.listdir(_UpperCamelCase )
lowerCAmelCase__ = [x for x in contents if x.endswith('.ckpt' )][0]
lowerCAmelCase__ = os.path.join(args.output_dir , _UpperCamelCase )
lowerCAmelCase__ = torch.load(_UpperCamelCase , map_location='cpu' )
lowerCAmelCase__ = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowerCAmelCase__ = {os.path.basename(_UpperCamelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 122
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE_ : Tuple =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
SCREAMING_SNAKE_CASE_ : List[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowerCamelCase ( self : List[Any] ):
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
__UpperCamelCase = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCamelCase = CLIPTextModel(__A )
__UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self : str , __A : Dict , __A : Any=0 ):
__UpperCamelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(__A ) ).convert('RGB' )
if str(__A ).startswith('mps' ):
__UpperCamelCase = torch.manual_seed(__A )
else:
__UpperCamelCase = torch.Generator(device=__A ).manual_seed(__A )
__UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A )
__UpperCamelCase = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
__UpperCamelCase = self.get_dummy_inputs(__A )
__UpperCamelCase = sd_pipe(**__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A )
__UpperCamelCase = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
__UpperCamelCase = self.get_dummy_inputs(__A )
__UpperCamelCase = 'french fries'
__UpperCamelCase = sd_pipe(**__A , negative_prompt=__A )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A )
__UpperCamelCase = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
__UpperCamelCase = self.get_dummy_inputs(__A )
__UpperCamelCase = [inputs['prompt']] * 2
__UpperCamelCase = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
__UpperCamelCase = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
__UpperCamelCase = image / 2 + 0.5
__UpperCamelCase = image.permute(0 , 3 , 1 , 2 )
__UpperCamelCase = image.repeat(2 , 1 , 1 , 1 )
__UpperCamelCase = sd_pipe(**__A ).images
__UpperCamelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__UpperCamelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' )
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A )
__UpperCamelCase = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
__UpperCamelCase = self.get_dummy_inputs(__A )
__UpperCamelCase = sd_pipe(**__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [round(__A , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(__A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowerCamelCase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowerCamelCase ( self : str ):
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__A )
__UpperCamelCase = VaeImageProcessor(do_resize=__A , do_normalize=__A )
__UpperCamelCase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
__UpperCamelCase = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type='pt' ) )[0]
__UpperCamelCase = components['vae']
__UpperCamelCase = self.get_dummy_inputs_by_type(__A , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__UpperCamelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__UpperCamelCase = pipe(**__A )[0]
__UpperCamelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : int , __A : int=0 ):
__UpperCamelCase = torch.manual_seed(__A )
__UpperCamelCase = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
__UpperCamelCase = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = pipe(**__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__A )
__UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = pipe(**__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self : int ):
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__A )
__UpperCamelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = pipe(**__A ).images
__UpperCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCamelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = 0
def callback_fn(__A : int , __A : int , __A : torch.FloatTensor ) -> None:
__UpperCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCamelCase = latents[0, -3:, -3:, -1]
__UpperCamelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCamelCase = latents[0, -3:, -3:, -1]
__UpperCamelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCamelCase = False
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__A , torch_dtype=torch.floataa )
__UpperCamelCase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
__UpperCamelCase = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowerCamelCase ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=__A , torch_dtype=torch.floataa )
__UpperCamelCase = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = pipe(**__A )
__UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _lowerCamelCase ( self : Optional[int] ):
__UpperCamelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCamelCase = inputs['image'].resize((5_0_4, 5_0_4) )
__UpperCamelCase = 'timbrooks/instruct-pix2pix'
__UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
__UpperCamelCase = pipe(**__A )
__UpperCamelCase = output.images[0]
__UpperCamelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__UpperCamelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 53
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """ibert"""
def __init__( self : Optional[int] , __lowercase : List[str]=3_05_22 , __lowercase : Tuple=7_68 , __lowercase : str=12 , __lowercase : Optional[int]=12 , __lowercase : Optional[Any]=30_72 , __lowercase : str="gelu" , __lowercase : List[str]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : List[str]=5_12 , __lowercase : str=2 , __lowercase : Tuple=0.02 , __lowercase : Union[str, Any]=1e-12 , __lowercase : List[Any]=1 , __lowercase : List[str]=0 , __lowercase : Optional[Any]=2 , __lowercase : int="absolute" , __lowercase : Tuple=False , __lowercase : int="none" , **__lowercase : Optional[Any] , ) -> List[Any]:
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Any =vocab_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple =hidden_act
SCREAMING_SNAKE_CASE__ : List[str] =intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict =type_vocab_size
SCREAMING_SNAKE_CASE__ : Tuple =initializer_range
SCREAMING_SNAKE_CASE__ : str =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple =position_embedding_type
SCREAMING_SNAKE_CASE__ : Any =quant_mode
SCREAMING_SNAKE_CASE__ : Optional[int] =force_dequant
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@property
def __magic_name__ ( self : str ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : str ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Any ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 152
| 0
|
from copy import deepcopy
class A :
def __init__(self , lowerCAmelCase = None , lowerCAmelCase = None ):
if arr is None and size is not None:
__lowercase= size
__lowercase= [0] * size
elif arr is not None:
self.init(lowerCAmelCase )
else:
raise ValueError('Either arr or size must be specified' )
def _A (self , lowerCAmelCase ):
__lowercase= len(lowerCAmelCase )
__lowercase= deepcopy(lowerCAmelCase )
for i in range(1 , self.size ):
__lowercase= self.next_(lowerCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def _A (self ):
__lowercase= self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
__lowercase= self.next_(lowerCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _A (lowerCAmelCase ):
return index + (index & (-index))
@staticmethod
def _A (lowerCAmelCase ):
return index - (index & (-index))
def _A (self , lowerCAmelCase , lowerCAmelCase ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
__lowercase= self.next_(lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
self.add(lowerCAmelCase , value - self.get(lowerCAmelCase ) )
def _A (self , lowerCAmelCase ):
if right == 0:
return 0
__lowercase= self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
__lowercase= self.prev(lowerCAmelCase )
return result
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return self.prefix(lowerCAmelCase ) - self.prefix(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
return self.query(lowerCAmelCase , index + 1 )
def _A (self , lowerCAmelCase ):
value -= self.tree[0]
if value < 0:
return -1
__lowercase= 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
__lowercase= 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304
| 0
|
import re
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if len(re.findall("""[ATCG]""" , snake_case ) ) != len(snake_case ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_lowerCAmelCase = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_snake_case ) , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_snake_case ) , x.transpose() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , transpose(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , transpose(_snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case ) , np.asarray(transpose(_snake_case ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(transpose(_snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(_snake_case , axes=(1, 2, 0) ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.reshape(_snake_case , (4, 3) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.reshape(_snake_case , (12, 5) ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , reshape(_snake_case , (4, 3) ).numpy() ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , reshape(_snake_case , (12, 5) ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (4, 3) ) , np.asarray(reshape(_snake_case , (4, 3) ) ) ) )
_lowerCAmelCase = np.random.randn(3 , 4 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(reshape(_snake_case , (12, 5) ) , np.asarray(reshape(_snake_case , (12, 5) ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.squeeze(_snake_case ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.squeeze(_snake_case , axis=2 ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , squeeze(_snake_case ).numpy() ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , squeeze(_snake_case , axis=2 ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(1 , 3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case ) , np.asarray(squeeze(_snake_case ) ) ) )
_lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(squeeze(_snake_case , axis=2 ) , np.asarray(squeeze(_snake_case , axis=2 ) ) ) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.expand_dims(_snake_case , axis=1 ) ) )
@require_torch
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = torch.tensor(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_tf
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = tf.constant(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , expand_dims(_snake_case , axis=1 ).numpy() ) )
@require_flax
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = np.random.randn(3 , 4 )
_lowerCAmelCase = jnp.array(_snake_case )
self.assertTrue(np.allclose(expand_dims(_snake_case , axis=1 ) , np.asarray(expand_dims(_snake_case , axis=1 ) ) ) )
| 82
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'swin2sr'
lowerCamelCase = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any],lowercase_ : Dict=6_4,lowercase_ : Dict=1,lowercase_ : str=3,lowercase_ : Any=1_8_0,lowercase_ : str=[6, 6, 6, 6, 6, 6],lowercase_ : Optional[Any]=[6, 6, 6, 6, 6, 6],lowercase_ : Optional[int]=8,lowercase_ : Optional[Any]=2.0,lowercase_ : str=True,lowercase_ : str=0.0,lowercase_ : List[Any]=0.0,lowercase_ : List[str]=0.1,lowercase_ : int="gelu",lowercase_ : List[str]=False,lowercase_ : Dict=0.02,lowercase_ : Dict=1E-5,lowercase_ : int=2,lowercase_ : List[str]=1.0,lowercase_ : Optional[Any]="1conv",lowercase_ : Tuple="pixelshuffle",**lowercase_ : Optional[Any],)-> List[Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = embed_dim
A__ = depths
A__ = len(lowercase_ )
A__ = num_heads
A__ = window_size
A__ = mlp_ratio
A__ = qkv_bias
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = drop_path_rate
A__ = hidden_act
A__ = use_absolute_embeddings
A__ = layer_norm_eps
A__ = initializer_range
A__ = upscale
A__ = img_range
A__ = resi_connection
A__ = upsampler
| 362
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase_ = getLogger(__name__)
lowercase_ = "cuda" if torch.cuda.is_available() else "cpu"
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict="summarization" , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE__ ).open('w' , encoding='utf-8' )
A__ = str(SCREAMING_SNAKE_CASE__ )
A__ = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if fpaa:
A__ = model.half()
A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
A__ = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if prefix is None:
A__ = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ):
A__ = [prefix + text for text in examples_chunk]
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE__ , padding='longest' ).to(SCREAMING_SNAKE_CASE__ )
A__ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE__ , )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
A__ = int(time.time() - start_time ) # seconds
A__ = len(SCREAMING_SNAKE_CASE__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _snake_case( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int]=True ) -> Dict:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE__ , default=8 , required=SCREAMING_SNAKE_CASE__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A__ , A__ = parser.parse_known_args()
A__ = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
A__ = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A__ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
A__ = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE__ , )
if args.reference_path is None:
return {}
# Compute scores
A__ = calculate_bleu if 'translation' in args.task else calculate_rouge
A__ = [x.rstrip() for x in open(args.save_path ).readlines()]
A__ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
A__ = score_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
scores.update(SCREAMING_SNAKE_CASE__ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE__ )
if args.info:
A__ = args.info
if verbose:
print(SCREAMING_SNAKE_CASE__ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 282
| 0
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase (__A , __A = True , __A = math.inf , __A = -math.inf , __A = math.inf , __A = -math.inf , __A = False , __A = 100 , __A = 0.01 , __A = 1 , ):
"""simple docstring"""
_a = False
_a = search_prob
_a = start_temperate
_a = []
_a = 0
_a = None
while not search_end:
_a = current_state.score()
if best_state is None or current_score > best_state.score():
_a = current_state
scores.append(a__)
iterations += 1
_a = None
_a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a = random.randint(0 , len(a__) - 1) # picking a random neighbor
_a = neighbors.pop(a__)
_a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a = picked_neighbor
else:
_a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a = picked_neighbor
_a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a = True
else:
_a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__) , a__)
plt.xlabel('''Iterations''')
plt.ylabel('''Function values''')
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase_ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowercase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase_ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowercase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase_ = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
lowercase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase_ = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
F"""{local_min.score()}"""
)
| 211
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A( a , a , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = '''sample'''
snake_case_ = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = 4
__a = 3
__a = (32, 32)
__a = floats_tensor((batch_size, num_channels) + sizes ).to(_snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a , __a = self.prepare_init_args_and_inputs_for_common()
__a = self.model_class(**_snake_case )
model.to(_snake_case )
assert not model.is_gradient_checkpointing and model.training
__a = model(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__a = torch.randn_like(_snake_case )
__a = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__a = self.model_class(**_snake_case )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_snake_case )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__a = model_a(**_snake_case ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__a = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__a = dict(model.named_parameters() )
__a = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a , __a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_snake_case )
self.assertIsNotNone(_snake_case )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_snake_case )
__a = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__a = model.to(_snake_case )
model.eval()
if torch_device == "mps":
__a = torch.manual_seed(0 )
else:
__a = torch.Generator(device=_snake_case ).manual_seed(0 )
__a = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__a = image.to(_snake_case )
with torch.no_grad():
__a = model(_snake_case , sample_posterior=_snake_case , generator=_snake_case ).sample
__a = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__a = torch.tensor(
[
-4.0_078E-01,
-3.8_323E-04,
-1.2_681E-01,
-1.1_462E-01,
2.0_095E-01,
1.0_893E-01,
-8.8_247E-02,
-3.0_361E-01,
-9.8_644E-03,
] )
elif torch_device == "cpu":
__a = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__a = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_snake_case , _snake_case , rtol=1E-2 ) )
@slow
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_snake_case ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 , _snake_case=(4, 3, 512, 512) , _snake_case=False ) -> Any:
'''simple docstring'''
__a = torch.floataa if fpaa else torch.floataa
__a = torch.from_numpy(load_hf_numpy(self.get_file_format(_snake_case , _snake_case ) ) ).to(_snake_case ).to(_snake_case )
return image
def SCREAMING_SNAKE_CASE_ ( self , _snake_case="CompVis/stable-diffusion-v1-4" , _snake_case=False ) -> Optional[Any]:
'''simple docstring'''
__a = '''fp16''' if fpaa else None
__a = torch.floataa if fpaa else torch.floataa
__a = AutoencoderKL.from_pretrained(
_snake_case , subfolder='''vae''' , torch_dtype=_snake_case , revision=_snake_case , )
model.to(_snake_case ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=0 ) -> Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(_snake_case )
return torch.Generator(device=_snake_case ).manual_seed(_snake_case )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , fpaa=_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model(_snake_case , generator=_snake_case , sample_posterior=_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
with torch.no_grad():
__a = model(_snake_case ).sample
assert sample.shape == image.shape
__a = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__a = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_snake_case , _snake_case , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__a = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__a = torch.tensor(_snake_case )
assert torch_all_close(_snake_case , _snake_case , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = self.get_sd_vae_model(fpaa=_snake_case )
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) , fpaa=_snake_case )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case , shape=(3, 4, 64, 64) )
with torch.no_grad():
__a = model.decode(_snake_case ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__a = model.decode(_snake_case ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_snake_case , _snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = self.get_sd_vae_model()
__a = self.get_sd_image(_snake_case )
__a = self.get_generator(_snake_case )
with torch.no_grad():
__a = model.encode(_snake_case ).latent_dist
__a = dist.sample(generator=_snake_case )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__a = sample[0, -1, -3:, -3:].flatten().cpu()
__a = torch.tensor(_snake_case )
__a = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(_snake_case , _snake_case , atol=_snake_case )
| 6
| 0
|
"""simple docstring"""
def __lowercase ( snake_case_ : str ,snake_case_ : str ) ->bool:
'''simple docstring'''
__A : Optional[int] = len(snake_case_ )
__A : List[str] = len(snake_case_ )
__A : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__A : Union[str, Any] = True
for i in range(snake_case_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__A : Tuple = True
if a[i].islower():
__A : Optional[int] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
"""simple docstring"""
a_ = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
a_ = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def __lowercase ( snake_case_ : float ,snake_case_ : str ,snake_case_ : str ) ->float:
'''simple docstring'''
__A : Tuple = from_type.lower().strip('''s''' )
__A : Optional[int] = to_type.lower().strip('''s''' )
__A : List[str] = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
__A : Any = UNIT_SYMBOL.get(snake_case_ ,snake_case_ )
if from_sanitized not in METRIC_CONVERSION:
__A : int = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
if to_sanitized not in METRIC_CONVERSION:
__A : str = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {', '.join(snake_case_ )}"""
)
raise ValueError(snake_case_ )
__A : Optional[Any] = METRIC_CONVERSION[from_sanitized]
__A : Optional[int] = METRIC_CONVERSION[to_sanitized]
__A : Union[str, Any] = 1
if from_exponent > to_exponent:
__A : Dict = from_exponent - to_exponent
else:
__A : Union[str, Any] = -(to_exponent - from_exponent)
return value * pow(10 ,snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291
| 0
|
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> Dict:
if index == number_of_items:
return 0
snake_case__ : List[str] = 0
snake_case__ : Dict = 0
snake_case__ : Dict = knapsack(__A , __A , __A , __A , index + 1 )
if weights[index] <= max_weight:
snake_case__ : int = values[index] + knapsack(
__A , __A , __A , max_weight - weights[index] , index + 1 )
return max(__A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> int:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> str:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class UpperCAmelCase__ ( metaclass=UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''flax''']
def __init__( self , *lowercase , **lowercase ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def __lowerCamelCase ( cls , *lowercase , **lowercase ) -> List[str]:
requires_backends(cls , ["""flax"""] )
| 349
| 0
|
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = len(lowerCamelCase__ )
lowerCamelCase_ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowerCamelCase_ = True
for i in range(lowerCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowerCamelCase_ = True
if a[i].islower():
lowerCamelCase_ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A ={
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47
| 1
|
"""simple docstring"""
import logging
from transformers import PretrainedConfig
a : Tuple = logging.getLogger(__name__)
a : Dict = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] ="""bertabs"""
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=512 , lowerCAmelCase__=6 , lowerCAmelCase__=512 , lowerCAmelCase__=8 , lowerCAmelCase__=512 , lowerCAmelCase__=0.2 , lowerCAmelCase__=6 , lowerCAmelCase__=768 , lowerCAmelCase__=8 , lowerCAmelCase__=2048 , lowerCAmelCase__=0.2 , **lowerCAmelCase__ , ) -> int:
super().__init__(**lowerCAmelCase__ )
a : Dict = vocab_size
a : str = max_pos
a : str = enc_layers
a : int = enc_hidden_size
a : Tuple = enc_heads
a : Dict = enc_ff_size
a : List[Any] = enc_dropout
a : str = dec_layers
a : Dict = dec_hidden_size
a : Union[str, Any] = dec_heads
a : Union[str, Any] = dec_ff_size
a : List[Any] = dec_dropout
| 105
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
a, a : str = image.size
a, a : Tuple = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a : Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
a : int = np.array(_lowercase ).astype(np.floataa ) / 255.0
a : List[str] = image[None].transpose(0 , 3 , 1 , 2 )
a : Dict = torch.from_numpy(_lowercase )
return 2.0 * image - 1.0
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> str:
super().__init__()
self.register_modules(vqvae=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
a : int = 1
elif isinstance(lowerCAmelCase__ , torch.Tensor ):
a : str = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase__ )}""" )
if isinstance(lowerCAmelCase__ , PIL.Image.Image ):
a : Tuple = preprocess(lowerCAmelCase__ )
a, a : Optional[Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a : Tuple = (batch_size, self.unet.config.in_channels // 2, height, width)
a : List[str] = next(self.unet.parameters() ).dtype
a : Any = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
a : Union[str, Any] = image.to(device=self.device , dtype=lowerCAmelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device )
a : int = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a : str = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a : List[Any] = {}
if accepts_eta:
a : Any = eta
for t in self.progress_bar(lowerCAmelCase__ ):
# concat latents and low resolution image in the channel dimension.
a : str = torch.cat([latents, image] , dim=1 )
a : int = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
a : Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
a : Tuple = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# decode the image latents with the VQVAE
a : List[Any] = self.vqvae.decode(lowerCAmelCase__ ).sample
a : int = torch.clamp(lowerCAmelCase__ , -1.0 , 1.0 )
a : Optional[int] = image / 2 + 0.5
a : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : Union[str, Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 105
| 1
|
"""simple docstring"""
from typing import Any
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Any ) -> Optional[Any]:
_UpperCamelCase : int = data
_UpperCamelCase : Union[str, Any] = None
def __repr__( self : List[Any] ) -> str:
return F'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int ) -> List[Any]:
_UpperCamelCase : Tuple = None
def __iter__( self : Union[str, Any] ) -> Any:
_UpperCamelCase : List[str] = self.head
while node:
yield node.data
_UpperCamelCase : Union[str, Any] = node.next
def __len__( self : Any ) -> int:
return sum(1 for _ in self )
def __repr__( self : List[str] ) -> str:
return "->".join([str(__a ) for item in self] )
def __getitem__( self : Optional[Any] , __a : int ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Dict , __a : int , __a : Any ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
_UpperCamelCase : List[str] = self.head
for _ in range(__a ):
_UpperCamelCase : str = current.next
_UpperCamelCase : Any = data
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Any ) -> None:
self.insert_nth(len(self ) , __a )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any ) -> None:
self.insert_nth(0 , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : int , __a : Any ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
_UpperCamelCase : Any = Node(__a )
if self.head is None:
_UpperCamelCase : int = new_node
elif index == 0:
_UpperCamelCase : List[str] = self.head # link new_node to head
_UpperCamelCase : List[Any] = new_node
else:
_UpperCamelCase : List[str] = self.head
for _ in range(index - 1 ):
_UpperCamelCase : List[Any] = temp.next
_UpperCamelCase : Any = temp.next
_UpperCamelCase : List[Any] = new_node
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> None: # print every node data
print(self )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return self.delete_nth(0 )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
_UpperCamelCase : str = self.head # default first node
if index == 0:
_UpperCamelCase : Dict = self.head.next
else:
_UpperCamelCase : Tuple = self.head
for _ in range(index - 1 ):
_UpperCamelCase : Tuple = temp.next
_UpperCamelCase : Any = temp.next
_UpperCamelCase : Optional[Any] = temp.next.next
return delete_node.data
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> bool:
return self.head is None
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> None:
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : Dict = self.head
while current:
# Store the current node's next node.
_UpperCamelCase : List[Any] = current.next
# Make the current node's next point backwards
_UpperCamelCase : Tuple = prev
# Make the previous node be the current node
_UpperCamelCase : Dict = current
# Make the current node the next node (to progress iteration)
_UpperCamelCase : Optional[int] = next_node
# Return prev in order to put the head at the end
_UpperCamelCase : int = prev
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase_ ) == i
linked_list.insert_nth(lowercase_ ,i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 ,11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 ,12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase_ ) == 9
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 ,10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 ,9 ) ) is True
for i in range(0 ,9 ):
_UpperCamelCase : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 ,9 ) ) is True
linked_list.reverse()
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(-8 ,1 ) )
def lowercase__ ( ) -> None:
"""simple docstring"""
_UpperCamelCase : Optional[int] = [
-9,
100,
Node(77_345_112 ),
"dlrow olleH",
7,
5_555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
_UpperCamelCase : Union[str, Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_UpperCamelCase : Optional[int] = linked_list.delete_head()
assert result == -9
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_UpperCamelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_UpperCamelCase : int = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase_ )
assert (
str(lowercase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
from doctest import testmod
testmod()
_UpperCamelCase : Union[str, Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowercase_ )
print("\nReading/changing Node data using indexing:" )
print(F'''Element at Position 1: {linked_list[1]}''' )
_UpperCamelCase : Union[str, Any] = input("Enter New Value: " ).strip()
print("New list:" )
print(lowercase_ )
print(F'''length of linked_list is : {len(lowercase_ )}''' )
if __name__ == "__main__":
main()
| 310
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase__ = logging.get_logger(__name__)
def lowercase__ ( lowercase_ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(lowercase_ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase_ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase_ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = ["pixel_values"]
def __init__( self : List[str] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : List[Any] , ) -> None:
super().__init__(**__a )
_UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 256}
_UpperCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : int = crop_size if crop_size is not None else {"height": 224, "width": 224}
_UpperCamelCase : Optional[Any] = get_size_dict(__a , param_name="crop_size" )
_UpperCamelCase : str = do_resize
_UpperCamelCase : Dict = size
_UpperCamelCase : int = do_center_crop
_UpperCamelCase : int = crop_size
_UpperCamelCase : Optional[Any] = resample
_UpperCamelCase : Dict = do_rescale
_UpperCamelCase : Any = rescale_factor
_UpperCamelCase : Any = offset
_UpperCamelCase : Union[str, Any] = do_normalize
_UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self : Any , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> np.ndarray:
_UpperCamelCase : Any = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
_UpperCamelCase : str = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
_UpperCamelCase : Any = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[int] , ) -> np.ndarray:
_UpperCamelCase : List[Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : bool = True , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> Optional[Any]:
_UpperCamelCase : Any = image.astype(np.floataa )
if offset:
_UpperCamelCase : Dict = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __SCREAMING_SNAKE_CASE ( self : Any , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
_UpperCamelCase : Optional[Any] = to_numpy_array(__a )
if do_resize:
_UpperCamelCase : Any = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
_UpperCamelCase : Dict = self.center_crop(__a , size=__a )
if do_rescale:
_UpperCamelCase : Union[str, Any] = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
_UpperCamelCase : int = self.normalize(image=__a , mean=__a , std=__a )
_UpperCamelCase : str = to_channel_dimension_format(__a , __a )
return image
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
_UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
_UpperCamelCase : str = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : str = offset if offset is not None else self.offset
_UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Tuple = get_size_dict(__a , default_to_square=__a )
_UpperCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Optional[int] = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
_UpperCamelCase : Union[str, Any] = make_batched(__a )
_UpperCamelCase : Optional[Any] = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase : List[Any] = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 310
| 1
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : torch.FloatTensor
lowercase__ : Optional[torch.FloatTensor] = None
def UpperCamelCase ( _A, _A=0.999, _A="cosine", ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__magic_name__ : Optional[int] = []
for i in range(__lowerCAmelCase ):
__magic_name__ : Tuple = i / num_diffusion_timesteps
__magic_name__ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ), __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase, dtype=torch.floataa )
class snake_case__ ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 10_00 , lowerCAmelCase__ = "fixed_small_log" , lowerCAmelCase__ = True , lowerCAmelCase__ = 1.0 , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "squaredcos_cap_v2" , ) -> Optional[int]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'""" )
__magic_name__ : Any = betas_for_alpha_bar(snake_case__ )
__magic_name__ : Optional[int] = 1.0 - self.betas
__magic_name__ : Optional[int] = torch.cumprod(self.alphas , dim=0 )
__magic_name__ : Optional[int] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__magic_name__ : Optional[Any] = 1.0
# setable values
__magic_name__ : Any = None
__magic_name__ : Union[str, Any] = torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() )
__magic_name__ : List[str] = variance_type
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
__magic_name__ : Union[str, Any] = num_inference_steps
__magic_name__ : Any = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__magic_name__ : List[Any] = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__magic_name__ : Tuple = torch.from_numpy(snake_case__ ).to(snake_case__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
if prev_timestep is None:
__magic_name__ : str = t - 1
__magic_name__ : Optional[int] = self.alphas_cumprod[t]
__magic_name__ : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Dict = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[str] = self.betas[t]
else:
__magic_name__ : Dict = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__magic_name__ : Dict = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__magic_name__ : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__magic_name__ : Union[str, Any] = torch.log(torch.clamp(snake_case__ , min=1e-2_0 ) )
__magic_name__ : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__magic_name__ : Any = variance.log()
__magic_name__ : Dict = beta.log()
__magic_name__ : Dict = (predicted_variance + 1) / 2
__magic_name__ : List[Any] = frac * max_log + (1 - frac) * min_log
return variance
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
__magic_name__ : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__magic_name__ : Union[str, Any] = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
__magic_name__ : int = None
# 1. compute alphas, betas
if prev_timestep is None:
__magic_name__ : List[str] = t - 1
__magic_name__ : Optional[Any] = self.alphas_cumprod[t]
__magic_name__ : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__magic_name__ : Union[str, Any] = 1 - alpha_prod_t
__magic_name__ : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__magic_name__ : List[Any] = self.betas[t]
__magic_name__ : Tuple = self.alphas[t]
else:
__magic_name__ : str = 1 - alpha_prod_t / alpha_prod_t_prev
__magic_name__ : Optional[int] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__magic_name__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__magic_name__ : Union[str, Any] = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__magic_name__ : Dict = torch.clamp(
snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__magic_name__ : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__magic_name__ : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__magic_name__ : List[Any] = 0
if t > 0:
__magic_name__ : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device )
__magic_name__ : Optional[int] = self._get_variance(
snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , )
if self.variance_type == "fixed_small_log":
__magic_name__ : Optional[Any] = variance
elif self.variance_type == "learned_range":
__magic_name__ : Tuple = (0.5 * variance).exp()
else:
raise ValueError(
F'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
__magic_name__ : List[Any] = variance * variance_noise
__magic_name__ : int = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> torch.FloatTensor:
__magic_name__ : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__magic_name__ : int = timesteps.to(original_samples.device )
__magic_name__ : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5
__magic_name__ : Any = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : List[str] = sqrt_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__magic_name__ : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__magic_name__ : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__magic_name__ : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 342
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] =ort.SessionOptions()
UpperCAmelCase : Optional[int] =False
return options
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Dict ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : Any =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : Optional[int] =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
UpperCAmelCase : Tuple =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
UpperCAmelCase : List[str] =LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase : int =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Union[str, Any] ='''A red cat sitting on a park bench'''
UpperCAmelCase : int =np.random.RandomState(0 )
UpperCAmelCase : str =pipe(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case__ , output_type='''np''' , )
UpperCAmelCase : Dict =output.images
UpperCAmelCase : int =images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
UpperCAmelCase : Union[str, Any] =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 348
| 0
|
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
snake_case = str(bin(UpperCamelCase_ ) )[2:] # remove the leading "0b"
snake_case = str(bin(UpperCamelCase_ ) )[2:] # remove the leading "0b"
snake_case = max(len(UpperCamelCase_ ) ,len(UpperCamelCase_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase_ ) ,b_binary.zfill(UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE : Optional[Any] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
re.sub('''<n>''' ,'''''' ,UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 213
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.