code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {}
class UpperCamelCase_ ( lowercase_ ):
lowercase = '''llama'''
lowercase = ['''past_key_values''']
def __init__( self , A=32000 , A=4096 , A=11008 , A=32 , A=32 , A=None , A="silu" , A=2048 , A=0.0_2 , A=1e-6 , A=True , A=0 , A=1 , A=2 , A=1 , A=False , A=None , **A , ) -> str:
UpperCAmelCase : int = vocab_size
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : str = hidden_size
UpperCAmelCase : int = intermediate_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Any = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Tuple = num_key_value_heads
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Any = rms_norm_eps
UpperCAmelCase : List[str] = pretraining_tp
UpperCAmelCase : Union[str, Any] = use_cache
UpperCAmelCase : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowercase( self ) -> Union[str, Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
UpperCAmelCase : Dict = self.rope_scaling.get("""type""" , A )
UpperCAmelCase : Optional[int] = self.rope_scaling.get("""factor""" , A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(A , A ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 265 |
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
import os
import sys
import unittest
A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A : Dict = os.path.join(git_repo_path, "src", "diffusers")
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : int = find_backend(" if not is_torch_available():" )
self.assertEqual(__lowerCamelCase , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
lowerCamelCase__ : str = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(__lowerCamelCase , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
lowerCamelCase__ : Dict = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(__lowerCamelCase , "torch_and_transformers_and_onnx" )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __lowerCamelCase )
self.assertIn("torch_and_transformers" , __lowerCamelCase )
self.assertIn("flax_and_transformers" , __lowerCamelCase )
self.assertIn("torch_and_transformers_and_onnx" , __lowerCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = create_dummy_object("CONSTANT" , "\'torch\'" )
self.assertEqual(__lowerCamelCase , "\nCONSTANT = None\n" )
lowerCamelCase__ : List[str] = create_dummy_object("function" , "\'torch\'" )
self.assertEqual(
__lowerCamelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n" )
lowerCamelCase__ : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n"
lowerCamelCase__ : List[str] = create_dummy_object("FakeClass" , "\'torch\'" )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
lowerCamelCase__ : Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
lowerCamelCase__ : List[str] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , __lowerCamelCase )
| 184 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return "".join(sorted(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return word_by_signature[signature(UpperCamelCase__ )]
_UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()})
_UpperCAmelCase : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 285 | 0 |
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCAmelCase (__lowerCAmelCase = "laptop" ):
_UpperCAmelCase : int = F"""https://www.amazon.in/laptop/s?k={product}"""
_UpperCAmelCase : int = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
_UpperCAmelCase : Dict = BeautifulSoup(requests.get(UpperCamelCase__ , headers=UpperCamelCase__ ).text )
# Initialize a Pandas dataframe with the column titles
_UpperCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
_UpperCAmelCase : Optional[Any] = item.ha.text
_UpperCAmelCase : Optional[Any] = "https://www.amazon.in/" + item.ha.a["href"]
_UpperCAmelCase : Tuple = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
_UpperCAmelCase : List[Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
_UpperCAmelCase : List[Any] = "Not available"
try:
_UpperCAmelCase : str = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
_UpperCAmelCase : str = ""
try:
_UpperCAmelCase : Any = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 100 )
except ValueError:
_UpperCAmelCase : Any = float("nan" )
except AttributeError:
pass
_UpperCAmelCase : Tuple = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_UpperCAmelCase : List[str] = " "
_UpperCAmelCase : List[str] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase__ = """headphones"""
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 234 |
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = np.shape(UpperCamelCase__ )
if rows != columns:
snake_case_ = (
'\'table\' has to be of square shaped array but got a '
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(UpperCamelCase__ )
snake_case_ = np.zeros((rows, columns) )
snake_case_ = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
snake_case_ = (table[i][j] - total) / upper[j][j]
snake_case_ = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
snake_case_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_ = """<<<<<<< This should probably be modified because it mentions: """
lowercase_ = """=======
>>>>>>>
"""
lowercase_ = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
lowercase_ = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def a ( A__ : List[str] ) -> Any:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __lowerCAmelCase ( lowercase_ ):
@staticmethod
def A__ ( lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=lowerCAmelCase )
def __init__( self , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ) -> int:
'''simple docstring'''
_lowercase =get_logger('datasets-cli/converting' )
_lowercase =tfds_path
_lowercase =datasets_directory
def A__ ( self ) -> Any:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_lowercase =os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_lowercase =os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
_lowercase =os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
_lowercase =[]
_lowercase =[]
_lowercase ={}
if os.path.isdir(self._tfds_path ):
_lowercase =os.listdir(lowerCAmelCase )
else:
_lowercase =[os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(lowerCAmelCase , encoding='utf-8' ) as f:
_lowercase =f.readlines()
_lowercase =[]
_lowercase =False
_lowercase =False
_lowercase =[]
for line in lines:
_lowercase =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_lowercase ='import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
_lowercase =''
continue
elif "from absl import logging" in out_line:
_lowercase ='from datasets import logging\n'
elif "getLogger" in out_line:
_lowercase =out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_lowercase =True
_lowercase =list(filter(lambda lowerCAmelCase : e in out_line , lowerCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '\n' )
out_lines.append(lowerCAmelCase )
out_lines.append(lowerCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
_lowercase =re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_lowercase =re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
_lowercase ='from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_lowercase =True
out_lines.append(lowerCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_lowercase =f_name.replace('.py' , '' )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
_lowercase =os.path.join(lowerCAmelCase , lowerCAmelCase )
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCAmelCase )
if needs_manual_update:
with_manual_update.append(lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.writelines(lowerCAmelCase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
_lowercase =os.path.basename(lowerCAmelCase )
_lowercase =imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(lowerCAmelCase , lowerCAmelCase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 205 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting'
snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case )
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 50
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = num_samples * [init_image]
snake_case_ = num_samples * [mask_image]
snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case )
# shard inputs and rng
snake_case_ = replicate(snake_case )
snake_case_ = jax.random.split(snake_case , jax.device_count() )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = pipeline(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case )
snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 )
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 285 | 0 |
import math
def _a ( UpperCamelCase_ : str ) -> List[Any]:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( UpperCamelCase_ : Union[str, Any] = 0.1 ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = 3
lowerCAmelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCamelCase__ )
snake_case_ = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
| 285 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
_snake_case : Any = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_snake_case : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
__UpperCAmelCase : Optional[str] = field(
default=lowercase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
__UpperCAmelCase : Optional[str] = field(
default=lowercase_ , metadata={"help": "The column name of the images in the files. If not set, will try to use \'image\' or \'img\'."} , )
__UpperCAmelCase : Optional[str] = field(default=lowercase_ , metadata={"help": "A folder containing the training data."} )
__UpperCAmelCase : Optional[str] = field(default=lowercase_ , metadata={"help": "A folder containing the validation data."} )
__UpperCAmelCase : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
__UpperCAmelCase : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
__UpperCAmelCase : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
__UpperCAmelCase : Optional[int] = field(
default=lowercase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=lowercase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __snake_case ( self : Optional[Any] ) -> List[str]:
__snake_case : str = {}
if self.train_dir is not None:
__snake_case : Any = self.train_dir
if self.validation_dir is not None:
__snake_case : List[str] = self.validation_dir
__snake_case : str = data_files if data_files else None
@dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : str = field(
default=lowercase_ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don\'t set if you want to train a model from scratch."
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(lowercase_ )} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCAmelCase : Optional[str] = field(
default=lowercase_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase_ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
__UpperCAmelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__UpperCAmelCase : str = field(default=lowercase_ , metadata={"help": "Name or path of preprocessor config."} )
__UpperCAmelCase : bool = field(
default=lowercase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=lowercase_ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=lowercase_ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=lowercase_ , metadata={"help": "Stride to use for the encoder."} , )
class a :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Any=192 , lowerCamelCase : Optional[int]=32 , lowerCamelCase : List[str]=4 , lowerCamelCase : Union[str, Any]=0.6 ) -> Optional[int]:
__snake_case : Optional[int] = input_size
__snake_case : str = mask_patch_size
__snake_case : int = model_patch_size
__snake_case : Dict = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
__snake_case : Any = self.input_size // self.mask_patch_size
__snake_case : List[Any] = self.mask_patch_size // self.model_patch_size
__snake_case : int = self.rand_size**2
__snake_case : Union[str, Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Dict ) -> Optional[Any]:
__snake_case : int = np.random.permutation(self.token_count )[: self.mask_count]
__snake_case : Optional[Any] = np.zeros(self.token_count , dtype=lowerCamelCase )
__snake_case : Any = 1
__snake_case : Optional[int] = mask.reshape((self.rand_size, self.rand_size) )
__snake_case : str = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = torch.stack([example["pixel_values"] for example in examples] )
__snake_case : List[str] = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCAmelCase_ ( ):
__snake_case : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case : List[str] = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__snake_case : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
__snake_case : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__snake_case : List[Any] = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCamelCase__ ) and data_args.train_val_split > 0.0:
__snake_case : List[Any] = ds["train"].train_test_split(data_args.train_val_split )
__snake_case : Optional[Any] = split["train"]
__snake_case : Optional[int] = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__snake_case : Optional[int] = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCamelCase__ )
elif model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
__snake_case : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCamelCase__ , "decoder_type" ):
__snake_case : Tuple = "simmim"
# adapt config
__snake_case : Dict = model_args.image_size if model_args.image_size is not None else config.image_size
__snake_case : Dict = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__snake_case : int = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__snake_case : Any = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase__ )
elif model_args.model_name_or_path:
__snake_case : Any = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase__ )
else:
__snake_case : Optional[int] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__snake_case : Union[str, Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
__snake_case : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(UpperCamelCase__ )
if training_args.do_train:
__snake_case : Optional[Any] = ds["train"].column_names
else:
__snake_case : Tuple = ds["validation"].column_names
if data_args.image_column_name is not None:
__snake_case : str = data_args.image_column_name
elif "image" in column_names:
__snake_case : Dict = "image"
elif "img" in column_names:
__snake_case : List[str] = "img"
else:
__snake_case : Optional[int] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__snake_case : List[Any] = Compose(
[
Lambda(lambda __lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.6_7, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__snake_case : int = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__lowerCamelCase ):
__snake_case : str = [transforms(UpperCamelCase__ ) for image in examples[image_column_name]]
__snake_case : str = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
__snake_case : Union[str, Any] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCamelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
__snake_case : int = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCamelCase__ )
# Initialize our trainer
__snake_case : Tuple = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
__snake_case : Dict = None
if training_args.resume_from_checkpoint is not None:
__snake_case : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case : Any = last_checkpoint
__snake_case : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__snake_case : Tuple = trainer.evaluate()
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
# Write model card and (optionally) push to hub
__snake_case : Optional[Any] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
if __name__ == "__main__":
main()
| 123 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : int = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file'''
__SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def a ( self ):
super().setUp()
snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids']
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def a ( self , snake_case=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case_ = 'This is a simple input'
snake_case_ = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ = ('This is a simple input', 'This is a pair')
snake_case_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.encode_plus(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
snake_case_ = None # Hotfixing padding = None
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case )
snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data
snake_case_ = list(sample_data.values() )
snake_case_ = list(map(tokenizer.encode , snake_case ) )
snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case , snake_case )
def a ( self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 285 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Union[str, Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=UpperCamelCase__ )
if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 285 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any] = " " ) -> str:
'''simple docstring'''
A__ = []
A__ = 0
for index, char in enumerate(UpperCamelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
A__ = index + 1
elif index + 1 == len(UpperCamelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 68 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_UpperCAmelCase : Tuple = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Any = 0xE000
_UpperCAmelCase : Dict = 0xE001
_UpperCAmelCase : Optional[int] = 0xE002
_UpperCAmelCase : Tuple = 0xE003
_UpperCAmelCase : Tuple = 0xE004
# Maps special codepoints to human-readable names.
_UpperCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ):
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , )
# Creates a mapping for looking up the IDs of special symbols.
snake_case_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
snake_case_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
snake_case_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
snake_case_ = UNICODE_VOCAB_SIZE
snake_case_ = len(self._special_codepoints )
@property
def a ( self ):
return self._unicode_vocab_size
def a ( self , snake_case ):
return list(snake_case )
def a ( self , snake_case ):
try:
return ord(snake_case )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def a ( self , snake_case ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(snake_case )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def a ( self , snake_case ):
return "".join(snake_case )
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def a ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
snake_case_ = [1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
result += ([0] * len(snake_case )) + [1]
return result
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def a ( self , snake_case , snake_case = None ):
return ()
| 285 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
snake_case : int = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
snake_case : Tuple = {
"""allenai/led-base-16384""": 16_384,
}
class _snake_case ( lowercase_ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = LEDTokenizer
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self , _a=None , _a=None , _a=None , _a="replace" , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=False , _a=True , **_a , ):
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
__magic_name__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _a ) != add_prefix_space:
__magic_name__ : List[str] = getattr(_a , pre_tok_state.pop("type" ) )
__magic_name__ : int = add_prefix_space
__magic_name__ : Optional[int] = pre_tok_class(**_a )
__magic_name__ : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__magic_name__ : Dict = "post_processor"
__magic_name__ : Optional[int] = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
__magic_name__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__magic_name__ : Tuple = tuple(state["sep"] )
if "cls" in state:
__magic_name__ : Tuple = tuple(state["cls"] )
__magic_name__ : str = False
if state.get("add_prefix_space" , _a ) != add_prefix_space:
__magic_name__ : List[str] = add_prefix_space
__magic_name__ : Optional[int] = True
if state.get("trim_offsets" , _a ) != trim_offsets:
__magic_name__ : int = trim_offsets
__magic_name__ : Optional[Any] = True
if changes_to_apply:
__magic_name__ : Any = getattr(_a , state.pop("type" ) )
__magic_name__ : str = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def SCREAMING_SNAKE_CASE ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
__magic_name__ : Dict = value
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
__magic_name__ : Any = kwargs.get("is_split_into_words" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , *_a , **_a ):
__magic_name__ : Tuple = kwargs.get("is_split_into_words" , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*_a , **_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : Tuple = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a=None ):
__magic_name__ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None ):
__magic_name__ : int = [self.sep_token_id]
__magic_name__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _a , _a = None , _a = PaddingStrategy.DO_NOT_PAD , _a = None , _a = None , ):
__magic_name__ : Optional[int] = super()._pad(
encoded_inputs=_a , max_length=_a , padding_strategy=_a , pad_to_multiple_of=_a , return_attention_mask=_a , )
# Load from model defaults
if return_attention_mask is None:
__magic_name__ : List[str] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__magic_name__ : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__magic_name__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(_a )
if needs_to_be_padded:
__magic_name__ : Optional[Any] = len(_a ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__magic_name__ : Union[str, Any] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__magic_name__ : Union[str, Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 281 |
def __lowerCamelCase ( ):
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_UpperCAmelCase : Union[str, Any] = generate_large_matrix()
_UpperCAmelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid )
assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(UpperCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case_ = (left + right) // 2
snake_case_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case_ = mid + 1
else:
snake_case_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(grid[0] )
for i in range(len(UpperCamelCase__ ) ):
snake_case_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase__ ) * len(grid[0] )) - total
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase__ ):
if number < 0:
total += len(UpperCamelCase__ ) - i
break
return total
def __lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
snake_case_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 285 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
UpperCAmelCase = HUGGINGFACE_HUB_CACHE
UpperCAmelCase = """config.json"""
UpperCAmelCase = """diffusion_pytorch_model.bin"""
UpperCAmelCase = """diffusion_flax_model.msgpack"""
UpperCAmelCase = """model.onnx"""
UpperCAmelCase = """diffusion_pytorch_model.safetensors"""
UpperCAmelCase = """weights.pb"""
UpperCAmelCase = """https://huggingface.co"""
UpperCAmelCase = default_cache_path
UpperCAmelCase = """diffusers_modules"""
UpperCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
UpperCAmelCase = ["""fp16""", """non-ema"""]
UpperCAmelCase = """.self_attn"""
| 256 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowercase :
def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ):
if not conversation_id:
snake_case_ = uuid.uuida()
if past_user_inputs is None:
snake_case_ = []
if generated_responses is None:
snake_case_ = []
snake_case_ = conversation_id
snake_case_ = past_user_inputs
snake_case_ = generated_responses
snake_case_ = text
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def a ( self , snake_case , snake_case = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
snake_case_ = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
snake_case_ = text
def a ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ = None
def a ( self , snake_case ):
self.generated_responses.append(snake_case )
def a ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
snake_case_ = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
snake_case_ = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowercase_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( lowercase_ ):
def __init__( self , *snake_case , **snake_case ):
super().__init__(*snake_case , **snake_case )
if self.tokenizer.pad_token_id is None:
snake_case_ = self.tokenizer.eos_token
def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
if min_length_for_response is not None:
snake_case_ = min_length_for_response
if minimum_tokens is not None:
snake_case_ = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case , snake_case=0 , **snake_case ):
snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def a ( self , snake_case , snake_case=32 ):
if not isinstance(snake_case , snake_case ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ = self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
snake_case_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def a ( self , snake_case , snake_case=10 , **snake_case ):
snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length )
snake_case_ = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
snake_case_ = max_length - minimum_tokens
snake_case_ = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ = model_inputs['attention_mask'][:, -trim:]
snake_case_ = model_inputs.pop('conversation' )
snake_case_ = max_length
snake_case_ = self.model.generate(**snake_case , **snake_case )
if self.model.config.is_encoder_decoder:
snake_case_ = 1
else:
snake_case_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def a ( self , snake_case , snake_case=True ):
snake_case_ = model_outputs['output_ids']
snake_case_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
snake_case_ = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def a ( self , snake_case ):
snake_case_ = self.tokenizer.eos_token_id
snake_case_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
snake_case_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 285 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class lowercase__ ( lowercase_ ):
'''simple docstring'''
A_ : str = '''ctrl'''
A_ : Optional[Any] = ['''past_key_values''']
A_ : Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , __snake_case=24_6534 , __snake_case=256 , __snake_case=1280 , __snake_case=8192 , __snake_case=48 , __snake_case=16 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1e-6 , __snake_case=0.02 , __snake_case=True , **__snake_case , ):
_SCREAMING_SNAKE_CASE : str = vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = n_positions
_SCREAMING_SNAKE_CASE : Optional[int] = n_embd
_SCREAMING_SNAKE_CASE : str = n_layer
_SCREAMING_SNAKE_CASE : Optional[int] = n_head
_SCREAMING_SNAKE_CASE : int = dff
_SCREAMING_SNAKE_CASE : Tuple = resid_pdrop
_SCREAMING_SNAKE_CASE : Tuple = embd_pdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_epsilon
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : str = use_cache
super().__init__(**__snake_case )
| 200 |
from PIL import Image
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCamelCase__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
_UpperCAmelCase : Tuple = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 285 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = [0] * no_of_processes
UpperCAmelCase : Dict = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
UpperCAmelCase : Tuple = burst_time[i]
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Optional[Any] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
UpperCAmelCase : Dict = []
UpperCAmelCase : Optional[Any] = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase : List[Any] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
UpperCAmelCase : Tuple = i
total_time += burst_time[target_process]
completed += 1
UpperCAmelCase : Any = 0
UpperCAmelCase : Dict = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
UpperCAmelCase : Tuple = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
UpperCAmelCase : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
a : List[Any] = 4
a : Any = [2, 5, 3, 7]
a : Any = [0, 0, 0, 0]
a : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a : Tuple = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 265 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase : Dict = """ResNetConfig"""
# Base docstring
_UpperCAmelCase : Optional[int] = """microsoft/resnet-50"""
_UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7]
# Image classification docstring
_UpperCAmelCase : Tuple = """microsoft/resnet-50"""
_UpperCAmelCase : int = """tiger cat"""
_UpperCAmelCase : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ):
super().__init__()
snake_case_ = nn.Convad(
snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case )
snake_case_ = nn.BatchNormad(snake_case )
snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self , snake_case ):
snake_case_ = self.convolution(snake_case )
snake_case_ = self.normalization(snake_case )
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
snake_case_ = config.num_channels
def a ( self , snake_case ):
snake_case_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
snake_case_ = self.embedder(snake_case )
snake_case_ = self.pooler(snake_case )
return embedding
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 2 ):
super().__init__()
snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case )
snake_case_ = nn.BatchNormad(snake_case )
def a ( self , snake_case ):
snake_case_ = self.convolution(snake_case )
snake_case_ = self.normalization(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ):
super().__init__()
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = (
ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case_ = nn.Sequential(
ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , )
snake_case_ = ACTaFN[activation]
def a ( self , snake_case ):
snake_case_ = hidden_state
snake_case_ = self.layer(snake_case )
snake_case_ = self.shortcut(snake_case )
hidden_state += residual
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ):
super().__init__()
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = out_channels // reduction
snake_case_ = (
ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case_ = nn.Sequential(
ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , )
snake_case_ = ACTaFN[activation]
def a ( self , snake_case ):
snake_case_ = hidden_state
snake_case_ = self.layer(snake_case )
snake_case_ = self.shortcut(snake_case )
hidden_state += residual
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ):
super().__init__()
snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
snake_case_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self , snake_case ):
snake_case_ = input
for layer in self.layers:
snake_case_ = layer(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ):
self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) )
def a ( self , snake_case , snake_case = False , snake_case = True ):
snake_case_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
snake_case_ = stage_module(snake_case )
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case , hidden_states=snake_case , )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = ResNetConfig
__SCREAMING_SNAKE_CASE : Any = '''resnet'''
__SCREAMING_SNAKE_CASE : int = '''pixel_values'''
__SCREAMING_SNAKE_CASE : Tuple = True
def a ( self , snake_case ):
if isinstance(snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self , snake_case , snake_case=False ):
if isinstance(snake_case , snake_case ):
snake_case_ = value
_UpperCAmelCase : Tuple = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : Optional[int] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , )
class lowercase ( lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
snake_case_ = config
snake_case_ = ResNetEmbeddings(snake_case )
snake_case_ = ResNetEncoder(snake_case )
snake_case_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case , snake_case = None , snake_case = None ):
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.embedder(snake_case )
snake_case_ = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase_ , )
class lowercase ( lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
snake_case_ = config.num_labels
snake_case_ = ResNetModel(snake_case )
# classification head
snake_case_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ):
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = outputs.pooler_output if return_dict else outputs[1]
snake_case_ = self.classifier(snake_case )
snake_case_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ = 'single_label_classification'
else:
snake_case_ = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case_ = MSELoss()
if self.num_labels == 1:
snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ = loss_fct(snake_case , snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ = BCEWithLogitsLoss()
snake_case_ = loss_fct(snake_case , snake_case )
if not return_dict:
snake_case_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowercase_ , )
class lowercase ( lowercase_ , lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
super()._init_backbone(snake_case )
snake_case_ = [config.embedding_size] + config.hidden_sizes
snake_case_ = ResNetEmbeddings(snake_case )
snake_case_ = ResNetEncoder(snake_case )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC )
def a ( self , snake_case , snake_case = None , snake_case = None ):
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = self.embedder(snake_case )
snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = outputs.hidden_states
snake_case_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
snake_case_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
| 285 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int=13 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : int=2 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : List[str]=32 , __lowerCamelCase : Optional[Any]=5 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : Tuple=37 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Optional[Any]=0.0_2 , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : str = num_channels
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : Any = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : List[Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Any = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Any = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Dict = (image_size // patch_size) ** 2
lowerCamelCase__ : int = num_patches + 1
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : str = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = FlaxViTModel(config=__lowerCamelCase )
lowerCamelCase__ : str = model(__lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Tuple = (self.image_size, self.image_size)
lowerCamelCase__ : str = (self.patch_size, self.patch_size)
lowerCamelCase__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.type_sequence_label_size
lowerCamelCase__ : Any = FlaxViTForImageClassification(config=__lowerCamelCase )
lowerCamelCase__ : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : List[str] = FlaxViTForImageClassification(__lowerCamelCase )
lowerCamelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : int = model(__lowerCamelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowercase ( lowercase_ , unittest.TestCase):
"""simple docstring"""
A__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = FlaxViTModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Dict = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[int] = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase : str , **__lowerCamelCase : List[str] ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest("JIT Enabled" ):
lowerCamelCase__ : int = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCamelCase__ : Dict = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Any = model_class_name.from_pretrained("google/vit-base-patch16-224" )
lowerCamelCase__ : Optional[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__lowerCamelCase )
| 184 |
class lowercase :
def __init__( self , snake_case , snake_case , snake_case ):
snake_case_ = name
snake_case_ = value
snake_case_ = weight
def __repr__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def a ( self ):
return self.value
def a ( self ):
return self.name
def a ( self ):
return self.weight
def a ( self ):
return self.value / self.weight
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
for i in range(len(UpperCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ )
snake_case_ = []
snake_case_ , snake_case_ = 0.0, 0.0
for i in range(len(UpperCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase__ ( lowercase_ ):
lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase : Optional[List[bool]]
lowerCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 234 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids']
snake_case_ = len(example['content'] ) / len(output['input_ids'] )
return output
_UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
_UpperCAmelCase : List[Any] = parser.parse_args()
if args.num_workers is None:
_UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count()
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_UpperCAmelCase : Optional[int] = time.time()
_UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_UpperCAmelCase : Tuple = time.time()
_UpperCAmelCase : Union[str, Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_UpperCAmelCase : Dict = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 285 | 0 |
from ...configuration_utils import PretrainedConfig
lowercase_ = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( lowercase_ ):
_a = '''tapas'''
def __init__( self , lowerCAmelCase=30_522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3_072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=1_024 , lowerCAmelCase=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=0 , lowerCAmelCase=10.0 , lowerCAmelCase=0 , lowerCAmelCase=1.0 , lowerCAmelCase=None , lowerCAmelCase=1.0 , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=1.0 , lowerCAmelCase=1.0 , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase="ratio" , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=64 , lowerCAmelCase=32 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_sizes
_lowercase =initializer_range
_lowercase =layer_norm_eps
# Fine-tuning task hyperparameters
_lowercase =positive_label_weight
_lowercase =num_aggregation_labels
_lowercase =aggregation_loss_weight
_lowercase =use_answer_as_supervision
_lowercase =answer_loss_importance
_lowercase =use_normalized_answer_loss
_lowercase =huber_loss_delta
_lowercase =temperature
_lowercase =aggregation_temperature
_lowercase =use_gumbel_for_cells
_lowercase =use_gumbel_for_aggregation
_lowercase =average_approximation_function
_lowercase =cell_selection_preference
_lowercase =answer_loss_cutoff
_lowercase =max_num_rows
_lowercase =max_num_columns
_lowercase =average_logits_per_cell
_lowercase =select_one_column
_lowercase =allow_empty_column_selection
_lowercase =init_cell_selection_weights_to_zero
_lowercase =reset_position_index_per_cell
_lowercase =disable_per_token_loss
# Aggregation hyperparameters
_lowercase =aggregation_labels
_lowercase =no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCAmelCase ):
_lowercase ={int(lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 205 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowercase__ ( lowercase_ ):
a_ ='''mobilenet_v2'''
def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=224 , __UpperCAmelCase=1.0 , __UpperCAmelCase=8 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=32 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu6" , __UpperCAmelCase=True , __UpperCAmelCase=0.8 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.001 , __UpperCAmelCase=255 , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = image_size
lowerCAmelCase__ = depth_multiplier
lowerCAmelCase__ = depth_divisible_by
lowerCAmelCase__ = min_depth
lowerCAmelCase__ = expand_ratio
lowerCAmelCase__ = output_stride
lowerCAmelCase__ = first_layer_is_expansion
lowerCAmelCase__ = finegrained_output
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = tf_padding
lowerCAmelCase__ = classifier_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = semantic_loss_ignore_index
class lowercase__ ( lowercase_ ):
a_ =version.parse("""1.11""" )
@property
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return 1E-4
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case_ = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case_ = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def a ( self , **snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , **snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , **snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def a ( self ):
snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case_ = self.get_image_processor(do_normalize=snake_case )
snake_case_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(snake_case , return_tensors='np' )
snake_case_ = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = processor(text=snake_case )
snake_case_ = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(snake_case )
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 285 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case : Tuple = logging.get_logger(__name__)
class a (lowercase_ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ['''input_features''', '''is_longer''']
def __init__( self : int , lowerCamelCase : Tuple=64 , lowerCamelCase : str=48000 , lowerCamelCase : Dict=480 , lowerCamelCase : List[Any]=10 , lowerCamelCase : Dict=1024 , lowerCamelCase : Any=0.0 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[str] = 0 , lowerCamelCase : Any = 14000 , lowerCamelCase : int = None , lowerCamelCase : Tuple = "fusion" , lowerCamelCase : List[Any] = "repeatpad" , **lowerCamelCase : Dict , ) -> List[str]:
super().__init__(
feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
__snake_case : int = top_db
__snake_case : Optional[int] = truncation
__snake_case : List[Any] = padding
__snake_case : int = fft_window_size
__snake_case : Union[str, Any] = (fft_window_size >> 1) + 1
__snake_case : str = hop_length
__snake_case : Dict = max_length_s
__snake_case : str = max_length_s * sampling_rate
__snake_case : Tuple = sampling_rate
__snake_case : Union[str, Any] = frequency_min
__snake_case : List[str] = frequency_max
__snake_case : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm=lowerCamelCase , mel_scale="htk" , )
__snake_case : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case ( self : Tuple ) -> int:
__snake_case : int = copy.deepcopy(self.__dict__ )
__snake_case : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __snake_case ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] = None ) -> Dict:
__snake_case : Optional[Any] = spectrogram(
lowerCamelCase , window_function(self.fft_window_size , "hann" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase , log_mel="dB" , )
return log_mel_spectrogram.T
def __snake_case ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] ) -> Optional[int]:
__snake_case : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : Dict = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__snake_case : Dict = [0]
# randomly choose index for each part
__snake_case : Union[str, Any] = np.random.choice(ranges[0] )
__snake_case : Optional[Any] = np.random.choice(ranges[1] )
__snake_case : Optional[Any] = np.random.choice(ranges[2] )
__snake_case : List[str] = mel[idx_front : idx_front + chunk_frames, :]
__snake_case : str = mel[idx_middle : idx_middle + chunk_frames, :]
__snake_case : str = mel[idx_back : idx_back + chunk_frames, :]
__snake_case : int = torch.tensor(mel[None, None, :] )
__snake_case : List[str] = torch.nn.functional.interpolate(
lowerCamelCase , size=[chunk_frames, 64] , mode="bilinear" , align_corners=lowerCamelCase )
__snake_case : Union[str, Any] = mel_shrink[0][0].numpy()
__snake_case : List[str] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __snake_case ( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Tuple:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__snake_case : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__snake_case : List[str] = len(lowerCamelCase ) - max_length
__snake_case : Any = np.random.randint(0 , overflow + 1 )
__snake_case : List[Any] = waveform[idx : idx + max_length]
__snake_case : Union[str, Any] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__snake_case : Tuple = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
__snake_case : Union[str, Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__snake_case : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__snake_case : Tuple = np.stack([mel, mel, mel, mel] , axis=0 )
__snake_case : Optional[Any] = False
else:
__snake_case : List[str] = self._random_mel_fusion(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__snake_case : Optional[int] = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
__snake_case : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__snake_case : Dict = int(max_length / len(lowerCamelCase ) )
__snake_case : Union[str, Any] = np.stack(np.tile(lowerCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__snake_case : Tuple = int(max_length / len(lowerCamelCase ) )
__snake_case : Any = np.stack(np.tile(lowerCamelCase , lowerCamelCase ) )
__snake_case : Optional[int] = np.pad(lowerCamelCase , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0 )
if truncation == "fusion":
__snake_case : Optional[int] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
__snake_case : Optional[int] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__snake_case : int = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] = None , lowerCamelCase : Union[str, Any] = None , lowerCamelCase : Union[str, Any] = None , lowerCamelCase : List[Any] = None , lowerCamelCase : Tuple = None , **lowerCamelCase : Any , ) -> int:
__snake_case : List[Any] = truncation if truncation is not None else self.truncation
__snake_case : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__snake_case : int = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__snake_case : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case : List[Any] = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
__snake_case : Optional[Any] = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case : List[str] = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
__snake_case : Optional[int] = [
self._get_input_mel(lowerCamelCase , max_length if max_length else self.nb_max_samples , lowerCamelCase , lowerCamelCase )
for waveform in raw_speech
]
__snake_case : Tuple = []
__snake_case : Any = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__snake_case : List[str] = np.random.randint(0 , len(lowerCamelCase ) )
__snake_case : Dict = True
if isinstance(input_mel[0] , lowerCamelCase ):
__snake_case : List[Any] = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__snake_case : Any = [[longer] for longer in is_longer]
__snake_case : List[Any] = {"input_features": input_mel, "is_longer": is_longer}
__snake_case : int = BatchFeature(lowerCamelCase )
if return_tensors is not None:
__snake_case : List[str] = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 123 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285 | 0 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def _snake_case ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple ) -> Union[str, Any]:
lowerCamelCase_ : List[str] =tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
lowerCamelCase_ : Any =DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def _snake_case ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) -> str:
lowerCamelCase_ : Optional[int] =str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] =DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , "dataset_info.json" ) )
def _snake_case ( ) -> List[str]:
lowerCamelCase_ : Tuple =DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
lowerCamelCase_ : Optional[Any] =dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCamelCase_ : Tuple =yaml.safe_dump(UpperCamelCase__ )
lowerCamelCase_ : Any =yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ) -> Dict:
lowerCamelCase_ : Optional[Any] =DatasetInfo()
lowerCamelCase_ : Union[str, Any] =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ) -> List[Any]:
lowerCamelCase_ : Dict =str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
lowerCamelCase_ : List[Any] =DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCamelCase_ : List[str] =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCamelCase_ : Optional[Any] =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , "README.md" ) )
| 144 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger()
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self , snake_case ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def a ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
def __call__( self , snake_case ):
snake_case_ = Tracker(self.dest )(snake_case ).parametrized
snake_case_ = Tracker(self.src )(snake_case ).parametrized
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) )
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(snake_case )} operations while'''
F''' destination module has {len(snake_case )}.''' )
for dest_m, src_m in zip(snake_case , snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval()
snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
snake_case_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = 1000
snake_case_ = (1, num_labels)
snake_case_ = 'huggingface/label-files'
snake_case_ = num_labels
snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
snake_case_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285 | 0 |
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Any:
'''simple docstring'''
A__ = [True] * n
A__ = False
A__ = False
A__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A__ = i * 2
while index < n:
A__ = False
A__ = index + i
A__ = [2]
for i in range(3 , UpperCamelCase__ , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase__ )
return primes
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> Union[str, Any]:
'''simple docstring'''
A__ = math.floor(math.sqrt(UpperCamelCase__ ) ) + 1_0_0
A__ = prime_sieve(UpperCamelCase__ )
A__ = 0
A__ = 0
A__ = primes[prime_index]
while (last_prime**2) <= limit:
A__ = primes[prime_index + 1]
A__ = last_prime**2
A__ = next_prime**2
# Get numbers divisible by lps(current)
A__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 68 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_UpperCAmelCase : Optional[int] = 5_0000
_UpperCAmelCase : Dict = 5000
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = os.path.split(__file__)
_UpperCAmelCase : List[str] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(UpperCamelCase__ ):
snake_case_ = dataset[i]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
snake_case_ = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
snake_case_ = dataset[i]
@get_duration
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with dataset.formatted_as(type=UpperCamelCase__ ):
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = dataset[i : i + batch_size]
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = {'num examples': SPEED_TEST_N_EXAMPLES}
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
snake_case_ = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
snake_case_ = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
snake_case_ = generate_example_dataset(
os.path.join(UpperCamelCase__ , 'dataset.arrow' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(UpperCamelCase__ ) )
snake_case_ = func(UpperCamelCase__ , **UpperCamelCase__ )
print('shuffling dataset' )
snake_case_ = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(UpperCamelCase__ ) )
snake_case_ = func(
UpperCamelCase__ , **UpperCamelCase__ )
with open(UpperCamelCase__ , 'wb' ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 285 | 0 |
from __future__ import annotations
import time
import numpy as np
snake_case : Union[str, Any] = [8, 5, 9, 7]
snake_case : Dict = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
snake_case : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _snake_case :
def __init__( self , _a , _a , _a , ):
__magic_name__ : List[str] = claim_vector
__magic_name__ : Union[str, Any] = allocated_resources_table
__magic_name__ : List[str] = maximum_claim_table
def SCREAMING_SNAKE_CASE ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE ( self ):
return {self.__need().index(_a ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE ( self , **_a ):
__magic_name__ : Dict = self.__need()
__magic_name__ : int = self.__allocated_resources_table
__magic_name__ : str = self.__available_resources()
__magic_name__ : Union[str, Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
__magic_name__ : Tuple = False
for each_need in need_list:
__magic_name__ : Tuple = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
__magic_name__ : Any = False
break
if execution:
__magic_name__ : Optional[int] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__magic_name__ : Optional[Any] = original_need_index
print(f'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
__magic_name__ : Optional[Any] = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(_a ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def SCREAMING_SNAKE_CASE ( self ):
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f'''P{self.__allocated_resources_table.index(_a ) + 1}'''
+ " ".join(f'''{it:>8}''' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f'''P{self.__maximum_claim_table.index(_a ) + 1}'''
+ " ".join(f'''{it:>8}''' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(_a ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
snake_case_ = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case_ = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
snake_case_ = val
return f[i][j]
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
snake_case_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
snake_case_ = dp[i - 1][w_]
return dp[n][w_], dp
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
snake_case_ = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
snake_case_ = (
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(UpperCamelCase__ )} values'''
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
snake_case_ = (
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCamelCase__ )
snake_case_ , snake_case_ = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = [3, 2, 4, 4]
_UpperCAmelCase : Optional[Any] = [4, 3, 2, 3]
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : str = 6
_UpperCAmelCase : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_UpperCAmelCase , _UpperCAmelCase : List[Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_UpperCAmelCase , _UpperCAmelCase : Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("""optimal_value = """, optimal_solution)
print("""An optimal subset corresponding to the optimal value""", optimal_subset)
| 285 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=lowercase_):
snake_case__ = ['''flax''', '''transformers''']
def __init__( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def _UpperCamelCase ( cls : int , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=lowercase_):
snake_case__ = ['''flax''', '''transformers''']
def __init__( self : Optional[int] , *__UpperCamelCase : Any , **__UpperCamelCase : Dict ) -> List[Any]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Any ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def _UpperCamelCase ( cls : Tuple , *__UpperCamelCase : Tuple , **__UpperCamelCase : Union[str, Any] ) -> int:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=lowercase_):
snake_case__ = ['''flax''', '''transformers''']
def __init__( self : Optional[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Tuple ) -> str:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def _UpperCamelCase ( cls : str , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[int] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ) -> Dict:
requires_backends(cls , ['''flax''', '''transformers'''] )
class UpperCAmelCase_ ( metaclass=lowercase_):
snake_case__ = ['''flax''', '''transformers''']
def __init__( self : Any , *__UpperCamelCase : Any , **__UpperCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def _UpperCamelCase ( cls : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Tuple ) -> Any:
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def _UpperCamelCase ( cls : Dict , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''flax''', '''transformers'''] )
| 256 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer''']
def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
snake_case_ = top_db
snake_case_ = truncation
snake_case_ = padding
snake_case_ = fft_window_size
snake_case_ = (fft_window_size >> 1) + 1
snake_case_ = hop_length
snake_case_ = max_length_s
snake_case_ = max_length_s * sampling_rate
snake_case_ = sampling_rate
snake_case_ = frequency_min
snake_case_ = frequency_max
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , )
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , )
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a ( self , snake_case , snake_case = None ):
snake_case_ = spectrogram(
snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
# randomly choose index for each part
snake_case_ = np.random.choice(ranges[0] )
snake_case_ = np.random.choice(ranges[1] )
snake_case_ = np.random.choice(ranges[2] )
snake_case_ = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ = torch.tensor(mel[None, None, :] )
snake_case_ = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case )
snake_case_ = mel_shrink[0][0].numpy()
snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a ( self , snake_case , snake_case , snake_case , snake_case ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ = len(snake_case ) - max_length
snake_case_ = np.random.randint(0 , overflow + 1 )
snake_case_ = waveform[idx : idx + max_length]
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ = False
else:
snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case )
snake_case_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
snake_case_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , snake_case ) )
snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ):
snake_case_ = truncation if truncation is not None else self.truncation
snake_case_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
snake_case_ = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
snake_case_ = []
snake_case_ = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ = np.random.randint(0 , len(snake_case ) )
snake_case_ = True
if isinstance(input_mel[0] , snake_case ):
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ = [[longer] for longer in is_longer]
snake_case_ = {'input_features': input_mel, 'is_longer': is_longer}
snake_case_ = BatchFeature(snake_case )
if return_tensors is not None:
snake_case_ = input_features.convert_to_tensors(snake_case )
return input_features
| 285 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowercase__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
A_ : Union[str, Any] = StableDiffusionLatentUpscalePipeline
A_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
A_ : Dict = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
A_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ : Any = frozenset([] )
A_ : Any = True
@property
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = 1
_SCREAMING_SNAKE_CASE : Dict = 4
_SCREAMING_SNAKE_CASE : str = (16, 16)
_SCREAMING_SNAKE_CASE : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__snake_case )
return image
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : Tuple = UNetaDConditionModel(
act_fn="""gelu""" , attention_head_dim=8 , norm_num_groups=__snake_case , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"""KDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
"""KCrossAttnDownBlock2D""",
) , in_channels=8 , mid_block_type=__snake_case , only_cross_attention=__snake_case , out_channels=5 , resnet_time_scale_shift="""scale_shift""" , time_embedding_type="""fourier""" , timestep_post_act="""gelu""" , up_block_types=("""KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KCrossAttnUpBlock2D""", """KUpBlock2D""") , )
_SCREAMING_SNAKE_CASE : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
"""DownEncoderBlock2D""",
] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
_SCREAMING_SNAKE_CASE : Optional[Any] = EulerDiscreteScheduler(prediction_type="""sample""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""quick_gelu""" , projection_dim=512 , )
_SCREAMING_SNAKE_CASE : List[str] = CLIPTextModel(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_SCREAMING_SNAKE_CASE : Any = {
"""unet""": model.eval(),
"""vae""": vae.eval(),
"""scheduler""": scheduler,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCAmelCase_ ( self , __snake_case , __snake_case=0 ):
if str(__snake_case ).startswith("""mps""" ):
_SCREAMING_SNAKE_CASE : str = torch.manual_seed(__snake_case )
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_SCREAMING_SNAKE_CASE : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": self.dummy_image.cpu(),
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = """cpu"""
_SCREAMING_SNAKE_CASE : int = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : int = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**__snake_case ).images
_SCREAMING_SNAKE_CASE : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_SCREAMING_SNAKE_CASE : Tuple = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
_SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1e-3 )
def UpperCAmelCase_ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def UpperCAmelCase_ ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def UpperCAmelCase_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def UpperCAmelCase_ ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def UpperCAmelCase_ ( self ):
super().test_save_load_local(expected_max_difference=3e-3 )
def UpperCAmelCase_ ( self ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[str] = [
"""DDIMScheduler""",
"""DDPMScheduler""",
"""PNDMScheduler""",
"""HeunDiscreteScheduler""",
"""EulerAncestralDiscreteScheduler""",
"""KDPM2DiscreteScheduler""",
"""KDPM2AncestralDiscreteScheduler""",
"""DPMSolverSDEScheduler""",
]
_SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
_SCREAMING_SNAKE_CASE : int = self.pipeline_class(**__snake_case )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = 2
_SCREAMING_SNAKE_CASE : List[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_SCREAMING_SNAKE_CASE : Any = getattr(__snake_case , scheduler_enum.name )
_SCREAMING_SNAKE_CASE : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
_SCREAMING_SNAKE_CASE : Any = pipe(**__snake_case )[0]
outputs.append(__snake_case )
assert check_same_shape(__snake_case )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = torch.manual_seed(33 )
_SCREAMING_SNAKE_CASE : Any = StableDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
_SCREAMING_SNAKE_CASE : Any = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
_SCREAMING_SNAKE_CASE : str = """a photo of an astronaut high resolution, unreal engine, ultra realistic"""
_SCREAMING_SNAKE_CASE : Any = pipe(__snake_case , generator=__snake_case , output_type="""latent""" ).images
_SCREAMING_SNAKE_CASE : Dict = upscaler(
prompt=__snake_case , image=__snake_case , num_inference_steps=20 , guidance_scale=0 , generator=__snake_case , output_type="""np""" , ).images[0]
_SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy""" )
assert np.abs((expected_image - image).mean() ) < 5e-2
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(33 )
_SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"""stabilityai/sd-x2-latent-upscaler""" , torch_dtype=torch.floataa )
upscaler.to("""cuda""" )
_SCREAMING_SNAKE_CASE : Tuple = """the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"""
_SCREAMING_SNAKE_CASE : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png""" )
_SCREAMING_SNAKE_CASE : Tuple = upscaler(
prompt=__snake_case , image=__snake_case , num_inference_steps=20 , guidance_scale=0 , generator=__snake_case , output_type="""np""" , ).images[0]
_SCREAMING_SNAKE_CASE : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-2
| 200 |
import os
import numpy
import onnx
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = os.path.dirname(UpperCamelCase__ )
snake_case_ = os.path.basename(UpperCamelCase__ )
snake_case_ = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCamelCase__ )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 285 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase_ ( lowercase_ ):
lowercase = 42
class UpperCamelCase_ ( lowercase_ , lowercase_ ):
@register_to_config
def __init__( self , A = 65536 , A = None , A = 2 , A = 2 , A = 0 , A = "fourier" , A = True , A = False , A = 0.0 , A = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A = "UNetMidBlock1D" , A = None , A = (32, 32, 64) , A = None , A = 8 , A = 1 , A = False , ) -> List[str]:
super().__init__()
UpperCAmelCase : List[str] = sample_size
# time
if time_embedding_type == "fourier":
UpperCAmelCase : Optional[int] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A , log=A , flip_sin_to_cos=A )
UpperCAmelCase : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCAmelCase : Tuple = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A , downscale_freq_shift=A )
UpperCAmelCase : Union[str, Any] = block_out_channels[0]
if use_timestep_embedding:
UpperCAmelCase : Optional[int] = block_out_channels[0] * 4
UpperCAmelCase : Any = TimestepEmbedding(
in_channels=A , time_embed_dim=A , act_fn=A , out_dim=block_out_channels[0] , )
UpperCAmelCase : Optional[Any] = nn.ModuleList([] )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Any = nn.ModuleList([] )
UpperCAmelCase : str = None
# down
UpperCAmelCase : List[Any] = in_channels
for i, down_block_type in enumerate(A ):
UpperCAmelCase : Tuple = output_channel
UpperCAmelCase : str = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCAmelCase : Dict = i == len(A ) - 1
UpperCAmelCase : Optional[Any] = get_down_block(
A , num_layers=A , in_channels=A , out_channels=A , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A )
# mid
UpperCAmelCase : Optional[int] = get_mid_block(
A , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A , add_downsample=A , )
# up
UpperCAmelCase : Tuple = list(reversed(A ) )
UpperCAmelCase : Dict = reversed_block_out_channels[0]
if out_block_type is None:
UpperCAmelCase : Dict = out_channels
else:
UpperCAmelCase : int = block_out_channels[0]
for i, up_block_type in enumerate(A ):
UpperCAmelCase : Union[str, Any] = output_channel
UpperCAmelCase : Optional[int] = (
reversed_block_out_channels[i + 1] if i < len(A ) - 1 else final_upsample_channels
)
UpperCAmelCase : Union[str, Any] = i == len(A ) - 1
UpperCAmelCase : Optional[int] = get_up_block(
A , num_layers=A , in_channels=A , out_channels=A , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A )
UpperCAmelCase : Tuple = output_channel
# out
UpperCAmelCase : List[str] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
UpperCAmelCase : str = get_out_block(
out_block_type=A , num_groups_out=A , embed_dim=block_out_channels[0] , out_channels=A , act_fn=A , fc_dim=block_out_channels[-1] // 4 , )
def _lowercase( self , A , A , A = True , ) -> List[str]:
UpperCAmelCase : Tuple = timestep
if not torch.is_tensor(A ):
UpperCAmelCase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
UpperCAmelCase : Union[str, Any] = timesteps[None].to(sample.device )
UpperCAmelCase : Optional[Any] = self.time_proj(A )
if self.config.use_timestep_embedding:
UpperCAmelCase : Union[str, Any] = self.time_mlp(A )
else:
UpperCAmelCase : List[str] = timestep_embed[..., None]
UpperCAmelCase : Dict = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCAmelCase : Any = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCAmelCase : int = ()
for downsample_block in self.down_blocks:
UpperCAmelCase , UpperCAmelCase : Tuple = downsample_block(hidden_states=A , temb=A )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCAmelCase : Any = self.mid_block(A , A )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCAmelCase : List[str] = down_block_res_samples[-1:]
UpperCAmelCase : Tuple = down_block_res_samples[:-1]
UpperCAmelCase : Tuple = upsample_block(A , res_hidden_states_tuple=A , temb=A )
# 5. post-process
if self.out_block:
UpperCAmelCase : Dict = self.out_block(A , A )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A )
| 265 |
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return vector * sigmoid(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
import os
import sys
import unittest
A : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
A : Any = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
A : str = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class _lowercase ( unittest.TestCase):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = get_test_to_tester_mapping(__lowerCamelCase )
lowerCamelCase__ : Any = get_test_to_tester_mapping(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {"BertModelTest": "BertModelTester"}
lowerCamelCase__ : Any = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = get_model_to_test_mapping(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = get_model_to_test_mapping(__lowerCamelCase )
lowerCamelCase__ : List[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
lowerCamelCase__ : Optional[int] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = get_model_to_tester_mapping(__lowerCamelCase )
lowerCamelCase__ : Any = get_model_to_tester_mapping(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
lowerCamelCase__ : str = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(get_test_info.to_json(__lowerCamelCase ) , __lowerCamelCase )
| 184 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return "".join(sorted(UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return word_by_signature[signature(UpperCamelCase__ )]
_UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
_UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()})
_UpperCAmelCase : List[str] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_UpperCAmelCase : Dict = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 285 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Optional[Any] = ['''sentencepiece''']
def __init__( self : List[str] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Tuple = ['''sentencepiece''']
def __init__( self : List[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Tuple = ['''sentencepiece''']
def __init__( self : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Optional[Any] = ['''sentencepiece''']
def __init__( self : Any , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Tuple = ['''sentencepiece''']
def __init__( self : Dict , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : List[str] = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Dict ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : str = ['''sentencepiece''']
def __init__( self : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : List[str] = ['''sentencepiece''']
def __init__( self : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Optional[Any] = ['''sentencepiece''']
def __init__( self : str , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Optional[Any] = ['''sentencepiece''']
def __init__( self : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Dict = ['''sentencepiece''']
def __init__( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : str = ['''sentencepiece''']
def __init__( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Union[str, Any] = ['''sentencepiece''']
def __init__( self : Dict , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : int = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Dict ) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Tuple = ['''sentencepiece''']
def __init__( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Dict = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : List[str] = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Any = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : str ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Dict = ['''sentencepiece''']
def __init__( self : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : List[str] = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : str ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Any = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Union[str, Any] = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : int = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : int = ['''sentencepiece''']
def __init__( self : Tuple , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Any = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : int = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : str ) ->str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Union[str, Any] = ['''sentencepiece''']
def __init__( self : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Any = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Tuple ) ->int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : int = ['''sentencepiece''']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[str] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : Optional[int] = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class lowerCAmelCase__ ( metaclass=lowercase_ ):
lowerCAmelCase : List[Any] = ['''sentencepiece''']
def __init__( self : Dict , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
| 234 |
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ , snake_case_ = np.shape(UpperCamelCase__ )
if rows != columns:
snake_case_ = (
'\'table\' has to be of square shaped array but got a '
F'''{rows}x{columns} array:\n{table}'''
)
raise ValueError(UpperCamelCase__ )
snake_case_ = np.zeros((rows, columns) )
snake_case_ = np.zeros((rows, columns) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
snake_case_ = (table[i][j] - total) / upper[j][j]
snake_case_ = 1
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
snake_case_ = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase__ ) )
snake_case_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
def a ( A__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def a ( A__ : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowercase =credit_card_number
_lowercase =0
_lowercase =len(UpperCamelCase__ ) - 2
for i in range(UpperCamelCase__ , -1 , -2 ):
# double the value of every second digit
_lowercase =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_lowercase =cc_number[:i] + str(UpperCamelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCamelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def a ( A__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_lowercase =F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(UpperCamelCase__ ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(UpperCamelCase__ ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(UpperCamelCase__ ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 205 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
snake_case_ = 'xvjiarui/stable-diffusion-2-inpainting'
snake_case_ , snake_case_ = FlaxStableDiffusionInpaintPipeline.from_pretrained(snake_case , safety_checker=snake_case )
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = jax.random.PRNGKey(0 )
snake_case_ = 50
snake_case_ = jax.device_count()
snake_case_ = num_samples * [prompt]
snake_case_ = num_samples * [init_image]
snake_case_ = num_samples * [mask_image]
snake_case_ , snake_case_ , snake_case_ = pipeline.prepare_inputs(snake_case , snake_case , snake_case )
# shard inputs and rng
snake_case_ = replicate(snake_case )
snake_case_ = jax.random.split(snake_case , jax.device_count() )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = shard(snake_case )
snake_case_ = pipeline(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , jit=snake_case )
snake_case_ = output.images.reshape(snake_case , 512 , 512 , 3 )
snake_case_ = images[0, 253:256, 253:256, -1]
snake_case_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case_ = jnp.array(
[0.3_61_13_07, 0.37_64_97_36, 0.3_75_74_08, 0.38_21_39_53, 0.39_29_51_67, 0.3_84_16_31, 0.41_55_49_78, 0.4_13_74_75, 0.4_21_70_84] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 285 | 0 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class lowercase__ ( nn.Module ):
def __init__( self )-> str:
'''simple docstring'''
super().__init__()
lowerCAmelCase__ = nn.Linear(3 , 4 )
lowerCAmelCase__ = nn.BatchNormad(4 )
lowerCAmelCase__ = nn.Linear(4 , 5 )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Any:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase ) ) )
class lowercase__ ( lowercase_ ):
def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )-> int:
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class lowercase__ ( lowercase_ ):
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
return output + 1
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(test_model._hf_hook , __UpperCAmelCase )
self.assertTrue(hasattr(__UpperCAmelCase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase , "_hf_hook" ) )
self.assertFalse(hasattr(__UpperCAmelCase , "_old_forward" ) )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = ModelHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase , append=__UpperCAmelCase )
self.assertEqual(isinstance(test_model._hf_hook , __UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__UpperCAmelCase , "_old_forward" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , "forward" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] )
remove_hook_from_module(__UpperCAmelCase )
self.assertFalse(hasattr(__UpperCAmelCase , "_hf_hook" ) )
self.assertFalse(hasattr(__UpperCAmelCase , "_old_forward" ) )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(x + 1 )
lowerCAmelCase__ = test_model(x + 2 )
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PreForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCAmelCase__ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , output + 2 , atol=1E-5 )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
lowerCAmelCase__ = PostForwardHook()
add_hook_to_module(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCAmelCase__ = True
lowerCAmelCase__ = test_model(__UpperCAmelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__UpperCAmelCase , AlignDevicesHook(io_same_device=__UpperCAmelCase ) )
lowerCAmelCase__ = torch.randn(2 , 3 ).to(0 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , torch.device(0 ) )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(hook_kwargs["execution_device"] )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
lowerCAmelCase__ = {
"execution_device": 0 if torch.cuda.is_available() else "cpu",
"offload": True,
"offload_buffers": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__UpperCAmelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__UpperCAmelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , offload_buffers=__UpperCAmelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# This will move each submodule on different devices
lowerCAmelCase__ = 0 if torch.cuda.is_available() else "cpu"
attach_align_device_hook(
__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCAmelCase__ = torch.device(__UpperCAmelCase )
self.assertEqual(model.batchnorm.running_mean.device , __UpperCAmelCase )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__UpperCAmelCase , execution_device=__UpperCAmelCase , offload=__UpperCAmelCase , weights_map=model.state_dict() , offload_buffers=__UpperCAmelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) )
self.assertEqual(model.lineara.weight.device , torch.device("meta" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) )
lowerCAmelCase__ = torch.randn(2 , 3 )
lowerCAmelCase__ = model(__UpperCAmelCase )
self.assertEqual(output.device , __UpperCAmelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__UpperCAmelCase )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) )
self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
| 340 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCamelCase__ )
snake_case_ = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
| 285 | 0 |
from PIL import Image
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(__lowerCamelCase ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
_snake_case : Tuple = change_contrast(img, 170)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 123 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : int = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file'''
__SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def a ( self ):
super().setUp()
snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids']
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def a ( self , snake_case=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case_ = 'This is a simple input'
snake_case_ = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ = ('This is a simple input', 'This is a pair')
snake_case_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.encode_plus(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
snake_case_ = None # Hotfixing padding = None
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case )
snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data
snake_case_ = list(sample_data.values() )
snake_case_ = list(map(tokenizer.encode , snake_case ) )
snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case , snake_case )
def a ( self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 285 | 0 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : Optional[int] ) -> Optional[int]:
for i in range(len(UpperCamelCase__ ) - 1 , 0 , -1 ):
lowerCamelCase_ : Tuple =False
for j in range(UpperCamelCase__ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase_ , lowerCamelCase_ : List[str] =unsorted[j - 1], unsorted[j]
lowerCamelCase_ : Union[str, Any] =True
for j in range(UpperCamelCase__ ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase_ , lowerCamelCase_ : List[Any] =unsorted[j + 1], unsorted[j]
lowerCamelCase_ : Optional[int] =True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ : Any = input('Enter numbers separated by a comma:\n').strip()
A__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(f'{cocktail_shaker_sort(unsorted) = }')
| 144 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1.5
snake_case_ = int(factor * num_class_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCamelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case_ = client.query(text=UpperCamelCase__ )
if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
snake_case_ = int(factor * num_images )
snake_case_ = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , )
snake_case_ = 0
snake_case_ = 0
snake_case_ = tqdm(desc='downloading real regularization images' , total=UpperCamelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
snake_case_ = class_images[count]
count += 1
try:
snake_case_ = requests.get(images['url'] )
if img.status_code == 200:
snake_case_ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser('' , add_help=UpperCamelCase__ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=UpperCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 285 | 0 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase__ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None , lowercase = None ) -> Optional[Any]:
'''simple docstring'''
A__ = None
A__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
A__ = os.path.abspath("examples" )
for item in os.listdir(lowercase ):
if item not in EXCLUDE_EXAMPLES:
A__ = os.path.join(lowercase , lowercase )
if os.path.isfile(lowercase ) and ".py" in item_path:
with self.subTest(
tested_script=lowercase , feature_script=lowercase , tested_section="main()" if parser_only else "training_function()" , ):
A__ = compare_against_test(
os.path.join(lowercase , lowercase ) , lowercase , lowercase , lowercase )
A__ = "\n".join(lowercase )
if special_strings is not None:
for string in special_strings:
A__ = diff.replace(lowercase , "" )
self.assertEqual(lowercase , "" )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
self.one_complete_example("complete_nlp_example.py" , lowercase )
self.one_complete_example("complete_nlp_example.py" , lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
A__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , lowercase , lowercase , lowercase )
self.one_complete_example("complete_cv_example.py" , lowercase , lowercase , lowercase )
@mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '1'} )
class a__ ( lowercase_ ):
"""simple docstring"""
__lowerCamelCase = False
@classmethod
def UpperCamelCase ( cls ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
A__ = tempfile.mkdtemp()
A__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
A__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCamelCase ( cls ) -> int:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
A__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
A__ = run_command(self._launch_args + testargs , return_stdout=lowercase )
self.assertNotIn("epoch 0:" , lowercase )
self.assertIn("epoch 1:" , lowercase )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
A__ = run_command(self._launch_args + testargs , return_stdout=lowercase )
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , lowercase )
self.assertIn("epoch 1:" , lowercase )
else:
self.assertIn("epoch 0:" , lowercase )
self.assertIn("epoch 1:" , lowercase )
@slow
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
A__ = run_command(self._launch_args + testargs , return_stdout=lowercase )
A__ = re.findall("({.+})" , lowercase )
A__ = [r for r in results if "accuracy" in r][-1]
A__ = ast.literal_eval(lowercase )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
A__ = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowercase , "tracking" ) ) )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 68 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""nielsr/canine-s""": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_UpperCAmelCase : Tuple = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Any = 0xE000
_UpperCAmelCase : Dict = 0xE001
_UpperCAmelCase : Optional[int] = 0xE002
_UpperCAmelCase : Tuple = 0xE003
_UpperCAmelCase : Tuple = 0xE004
# Maps special codepoints to human-readable names.
_UpperCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_UpperCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=chr(snake_case ) , snake_case=False , snake_case=2048 , **snake_case , ):
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , model_max_length=snake_case , **snake_case , )
# Creates a mapping for looking up the IDs of special symbols.
snake_case_ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
snake_case_ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
snake_case_ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
snake_case_ = UNICODE_VOCAB_SIZE
snake_case_ = len(self._special_codepoints )
@property
def a ( self ):
return self._unicode_vocab_size
def a ( self , snake_case ):
return list(snake_case )
def a ( self , snake_case ):
try:
return ord(snake_case )
except TypeError:
raise ValueError(F'''invalid token: \'{token}\'''' )
def a ( self , snake_case ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(snake_case )
except TypeError:
raise ValueError(F'''invalid id: {index}''' )
def a ( self , snake_case ):
return "".join(snake_case )
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def a ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
snake_case_ = [1] + ([0] * len(snake_case )) + [1]
if token_ids_a is not None:
result += ([0] * len(snake_case )) + [1]
return result
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def a ( self , snake_case , snake_case = None ):
return ()
| 285 | 0 |
class _snake_case :
def __init__( self , _a , _a=None , _a=None ):
__magic_name__ : Any = data
__magic_name__ : List[str] = previous
__magic_name__ : Union[str, Any] = next_node
def __str__( self ):
return f'''{self.data}'''
def SCREAMING_SNAKE_CASE ( self ):
return self.data
def SCREAMING_SNAKE_CASE ( self ):
return self.next
def SCREAMING_SNAKE_CASE ( self ):
return self.previous
class _snake_case :
def __init__( self , _a ):
__magic_name__ : int = head
def __iter__( self ):
return self
def SCREAMING_SNAKE_CASE ( self ):
if not self.current:
raise StopIteration
else:
__magic_name__ : Optional[Any] = self.current.get_data()
__magic_name__ : int = self.current.get_next()
return value
class _snake_case :
def __init__( self ):
__magic_name__ : Union[str, Any] = None # First node in list
__magic_name__ : List[Any] = None # Last node in list
def __str__( self ):
__magic_name__ : int = self.head
__magic_name__ : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
__magic_name__ : Dict = current.get_next()
return " ".join(str(_a ) for node in nodes )
def __contains__( self , _a ):
__magic_name__ : Union[str, Any] = self.head
while current:
if current.get_data() == value:
return True
__magic_name__ : Union[str, Any] = current.get_next()
return False
def __iter__( self ):
return LinkedListIterator(self.head )
def SCREAMING_SNAKE_CASE ( self ):
if self.head:
return self.head.get_data()
return None
def SCREAMING_SNAKE_CASE ( self ):
if self.tail:
return self.tail.get_data()
return None
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.head is None:
__magic_name__ : Tuple = node
__magic_name__ : List[Any] = node
else:
self.insert_before_node(self.head , _a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.head is None:
self.set_head(_a )
else:
self.insert_after_node(self.tail , _a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : int = Node(_a )
if self.head is None:
self.set_head(_a )
else:
self.set_tail(_a )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Dict = node
__magic_name__ : Optional[Any] = node.previous
if node.get_previous() is None:
__magic_name__ : int = node_to_insert
else:
__magic_name__ : Tuple = node_to_insert
__magic_name__ : str = node_to_insert
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : List[Any] = node
__magic_name__ : Tuple = node.next
if node.get_next() is None:
__magic_name__ : Dict = node_to_insert
else:
__magic_name__ : Dict = node_to_insert
__magic_name__ : Optional[Any] = node_to_insert
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : List[Any] = 1
__magic_name__ : Any = Node(_a )
__magic_name__ : int = self.head
while node:
if current_position == position:
self.insert_before_node(_a , _a )
return
current_position += 1
__magic_name__ : Tuple = node.next
self.insert_after_node(self.tail , _a )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Any = self.head
while node:
if node.get_data() == item:
return node
__magic_name__ : Tuple = node.get_next()
raise Exception("Node not found" )
def SCREAMING_SNAKE_CASE ( self , _a ):
if (node := self.get_node(_a )) is not None:
if node == self.head:
__magic_name__ : Optional[int] = self.head.get_next()
if node == self.tail:
__magic_name__ : Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(_a )
@staticmethod
def SCREAMING_SNAKE_CASE ( _a ):
if node.get_next():
__magic_name__ : Any = node.previous
if node.get_previous():
__magic_name__ : Optional[int] = node.next
__magic_name__ : str = None
__magic_name__ : int = None
def SCREAMING_SNAKE_CASE ( self ):
return self.head is None
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281 |
def __lowerCamelCase ( ):
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_UpperCAmelCase : Union[str, Any] = generate_large_matrix()
_UpperCAmelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid )
assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(UpperCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case_ = (left + right) // 2
snake_case_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case_ = mid + 1
else:
snake_case_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(grid[0] )
for i in range(len(UpperCamelCase__ ) ):
snake_case_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase__ ) * len(grid[0] )) - total
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase__ ):
if number < 0:
total += len(UpperCamelCase__ ) - i
break
return total
def __lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
snake_case_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 285 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase_ ( lowercase_):
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
class UpperCAmelCase_ ( lowercase_):
def __init__( self : int , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=512 , __UpperCamelCase : int="cls" , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Any=True , **__UpperCamelCase : List[Any] , ) -> Any:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = project_dim
_UpperCamelCase = pooler_fn
_UpperCamelCase = learn_encoder
_UpperCamelCase = use_attention_mask
class UpperCAmelCase_ ( lowercase_):
snake_case__ = [R'''pooler''', R'''logit_scale''']
snake_case__ = [R'''position_ids''', R'''predictions.decoder.bias''']
snake_case__ = '''roberta'''
snake_case__ = RobertaSeriesConfig
def __init__( self : Optional[int] , __UpperCamelCase : List[str] ) -> List[Any]:
super().__init__(__UpperCamelCase )
_UpperCamelCase = XLMRobertaModel(__UpperCamelCase )
_UpperCamelCase = nn.Linear(config.hidden_size , config.project_dim )
_UpperCamelCase = getattr(__UpperCamelCase , '''has_pre_transformation''' , __UpperCamelCase )
if self.has_pre_transformation:
_UpperCamelCase = nn.Linear(config.hidden_size , config.project_dim )
_UpperCamelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : List[str] = None , __UpperCamelCase : str = None , __UpperCamelCase : Tuple = None , __UpperCamelCase : Union[str, Any] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Any = None , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : List[str] = None , __UpperCamelCase : Dict = None , ) -> Tuple:
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.base_model(
input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , position_ids=__UpperCamelCase , head_mask=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__UpperCamelCase , )
if self.has_pre_transformation:
_UpperCamelCase = outputs['''hidden_states'''][-2]
_UpperCamelCase = self.pre_LN(__UpperCamelCase )
_UpperCamelCase = self.transformation_pre(__UpperCamelCase )
return TransformationModelOutput(
projection_state=__UpperCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_UpperCamelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__UpperCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 256 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowercase :
def __init__( self , snake_case = None , snake_case = None , snake_case=None , snake_case=None ):
if not conversation_id:
snake_case_ = uuid.uuida()
if past_user_inputs is None:
snake_case_ = []
if generated_responses is None:
snake_case_ = []
snake_case_ = conversation_id
snake_case_ = past_user_inputs
snake_case_ = generated_responses
snake_case_ = text
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def a ( self , snake_case , snake_case = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
snake_case_ = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
snake_case_ = text
def a ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
snake_case_ = None
def a ( self , snake_case ):
self.generated_responses.append(snake_case )
def a ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
snake_case_ = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
snake_case_ = 'user' if is_user else 'bot'
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
lowercase_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( lowercase_ ):
def __init__( self , *snake_case , **snake_case ):
super().__init__(*snake_case , **snake_case )
if self.tokenizer.pad_token_id is None:
snake_case_ = self.tokenizer.eos_token
def a ( self , snake_case=None , snake_case=None , snake_case=None , **snake_case ):
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
if min_length_for_response is not None:
snake_case_ = min_length_for_response
if minimum_tokens is not None:
snake_case_ = minimum_tokens
if "max_length" in generate_kwargs:
snake_case_ = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
snake_case_ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self , snake_case , snake_case=0 , **snake_case ):
snake_case_ = super().__call__(snake_case , num_workers=snake_case , **snake_case )
if isinstance(snake_case , snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
def a ( self , snake_case , snake_case=32 ):
if not isinstance(snake_case , snake_case ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
snake_case_ = self.tokenizer._build_conversation_input_ids(snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
snake_case_ = self._legacy_parse_and_tokenize(snake_case )
if self.framework == "pt":
snake_case_ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
snake_case_ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def a ( self , snake_case , snake_case=10 , **snake_case ):
snake_case_ = generate_kwargs.get('max_length' , self.model.config.max_length )
snake_case_ = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
snake_case_ = max_length - minimum_tokens
snake_case_ = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
snake_case_ = model_inputs['attention_mask'][:, -trim:]
snake_case_ = model_inputs.pop('conversation' )
snake_case_ = max_length
snake_case_ = self.model.generate(**snake_case , **snake_case )
if self.model.config.is_encoder_decoder:
snake_case_ = 1
else:
snake_case_ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def a ( self , snake_case , snake_case=True ):
snake_case_ = model_outputs['output_ids']
snake_case_ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case , )
snake_case_ = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(snake_case )
return conversation
def a ( self , snake_case ):
snake_case_ = self.tokenizer.eos_token_id
snake_case_ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(snake_case , add_special_tokens=snake_case ) )
if len(snake_case ) > self.tokenizer.model_max_length:
snake_case_ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 285 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = 1.5
_SCREAMING_SNAKE_CASE : List[str] = int(factor * num_class_images )
_SCREAMING_SNAKE_CASE : Optional[Any] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCamelCase__ , aesthetic_weight=0.1 )
os.makedirs(f"""{class_data_dir}/images""" , exist_ok=UpperCamelCase__ )
if len(list(Path(f"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
_SCREAMING_SNAKE_CASE : str = client.query(text=UpperCamelCase__ )
if len(UpperCamelCase__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
_SCREAMING_SNAKE_CASE : Tuple = int(factor * num_images )
_SCREAMING_SNAKE_CASE : Dict = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCamelCase__ , aesthetic_weight=0.1 , )
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : int = tqdm(desc="""downloading real regularization images""" , total=UpperCamelCase__ )
with open(f"""{class_data_dir}/caption.txt""" , """w""" ) as fa, open(f"""{class_data_dir}/urls.txt""" , """w""" ) as fa, open(
f"""{class_data_dir}/images.txt""" , """w""" ) as fa:
while total < num_class_images:
_SCREAMING_SNAKE_CASE : Optional[int] = class_images[count]
count += 1
try:
_SCREAMING_SNAKE_CASE : Dict = requests.get(images["""url"""] )
if img.status_code == 200:
_SCREAMING_SNAKE_CASE : str = Image.open(BytesIO(img.content ) )
with open(f"""{class_data_dir}/images/{total}.jpg""" , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser("""""" , add_help=UpperCamelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCamelCase__ , type=UpperCamelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCamelCase__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 200 |
from PIL import Image
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCamelCase__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
_UpperCAmelCase : Tuple = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 285 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __lowerCamelCase ( _lowercase="" ) -> str:
UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
return os.path.join(UpperCamelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : Optional[int] = AgentAudio(A )
UpperCAmelCase : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(A , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(A ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase : List[str] = sf.read(A )
self.assertTrue(torch.allclose(A , torch.tensor(A ) , atol=1e-4 ) )
def _lowercase( self ) -> Any:
UpperCAmelCase : Optional[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase : str = get_new_path(suffix=""".wav""" )
sf.write(A , A , 16000 )
UpperCAmelCase : Tuple = AgentAudio(A )
self.assertTrue(torch.allclose(A , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , A )
@require_vision
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[Any] = torch.randint(0 , 256 , (64, 64, 3) )
UpperCAmelCase : Optional[Any] = AgentImage(A )
UpperCAmelCase : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(A , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A ) )
def _lowercase( self ) -> int:
UpperCAmelCase : List[str] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
UpperCAmelCase : Union[str, Any] = Image.open(A )
UpperCAmelCase : List[str] = AgentImage(A )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A ) )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Optional[int] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
UpperCAmelCase : Optional[Any] = Image.open(A )
UpperCAmelCase : Optional[Any] = AgentImage(A )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(A ) )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : List[Any] = """Hey!"""
UpperCAmelCase : Union[str, Any] = AgentText(A )
self.assertEqual(A , agent_type.to_string() )
self.assertEqual(A , agent_type.to_raw() )
self.assertEqual(A , A )
| 265 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase : Dict = """ResNetConfig"""
# Base docstring
_UpperCAmelCase : Optional[int] = """microsoft/resnet-50"""
_UpperCAmelCase : Optional[Any] = [1, 2048, 7, 7]
# Image classification docstring
_UpperCAmelCase : Tuple = """microsoft/resnet-50"""
_UpperCAmelCase : int = """tiger cat"""
_UpperCAmelCase : Optional[Any] = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 3 , snake_case = 1 , snake_case = "relu" ):
super().__init__()
snake_case_ = nn.Convad(
snake_case , snake_case , kernel_size=snake_case , stride=snake_case , padding=kernel_size // 2 , bias=snake_case )
snake_case_ = nn.BatchNormad(snake_case )
snake_case_ = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self , snake_case ):
snake_case_ = self.convolution(snake_case )
snake_case_ = self.normalization(snake_case )
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
snake_case_ = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
snake_case_ = config.num_channels
def a ( self , snake_case ):
snake_case_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
snake_case_ = self.embedder(snake_case )
snake_case_ = self.pooler(snake_case )
return embedding
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 2 ):
super().__init__()
snake_case_ = nn.Convad(snake_case , snake_case , kernel_size=1 , stride=snake_case , bias=snake_case )
snake_case_ = nn.BatchNormad(snake_case )
def a ( self , snake_case ):
snake_case_ = self.convolution(snake_case )
snake_case_ = self.normalization(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" ):
super().__init__()
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = (
ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case_ = nn.Sequential(
ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , activation=snake_case ) , )
snake_case_ = ACTaFN[activation]
def a ( self , snake_case ):
snake_case_ = hidden_state
snake_case_ = self.layer(snake_case )
snake_case_ = self.shortcut(snake_case )
hidden_state += residual
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case = 1 , snake_case = "relu" , snake_case = 4 ):
super().__init__()
snake_case_ = in_channels != out_channels or stride != 1
snake_case_ = out_channels // reduction
snake_case_ = (
ResNetShortCut(snake_case , snake_case , stride=snake_case ) if should_apply_shortcut else nn.Identity()
)
snake_case_ = nn.Sequential(
ResNetConvLayer(snake_case , snake_case , kernel_size=1 ) , ResNetConvLayer(snake_case , snake_case , stride=snake_case ) , ResNetConvLayer(snake_case , snake_case , kernel_size=1 , activation=snake_case ) , )
snake_case_ = ACTaFN[activation]
def a ( self , snake_case ):
snake_case_ = hidden_state
snake_case_ = self.layer(snake_case )
snake_case_ = self.shortcut(snake_case )
hidden_state += residual
snake_case_ = self.activation(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , ):
super().__init__()
snake_case_ = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
snake_case_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(snake_case , snake_case , stride=snake_case , activation=config.hidden_act ) , *[layer(snake_case , snake_case , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self , snake_case ):
snake_case_ = input
for layer in self.layers:
snake_case_ = layer(snake_case )
return hidden_state
class lowercase ( nn.Module ):
def __init__( self , snake_case ):
super().__init__()
snake_case_ = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
snake_case , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case , config.depths[1:] ):
self.stages.append(ResNetStage(snake_case , snake_case , snake_case , depth=snake_case ) )
def a ( self , snake_case , snake_case = False , snake_case = True ):
snake_case_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
snake_case_ = stage_module(snake_case )
if output_hidden_states:
snake_case_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case , hidden_states=snake_case , )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = ResNetConfig
__SCREAMING_SNAKE_CASE : Any = '''resnet'''
__SCREAMING_SNAKE_CASE : int = '''pixel_values'''
__SCREAMING_SNAKE_CASE : Tuple = True
def a ( self , snake_case ):
if isinstance(snake_case , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(snake_case , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self , snake_case , snake_case=False ):
if isinstance(snake_case , snake_case ):
snake_case_ = value
_UpperCAmelCase : Tuple = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase : Optional[int] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , lowercase_ , )
class lowercase ( lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
snake_case_ = config
snake_case_ = ResNetEmbeddings(snake_case )
snake_case_ = ResNetEncoder(snake_case )
snake_case_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case , snake_case = None , snake_case = None ):
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.embedder(snake_case )
snake_case_ = self.encoder(
snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = encoder_outputs[0]
snake_case_ = self.pooler(snake_case )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case , pooler_output=snake_case , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase_ , )
class lowercase ( lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
snake_case_ = config.num_labels
snake_case_ = ResNetModel(snake_case )
# classification head
snake_case_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , ):
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = self.resnet(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = outputs.pooler_output if return_dict else outputs[1]
snake_case_ = self.classifier(snake_case )
snake_case_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case_ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case_ = 'single_label_classification'
else:
snake_case_ = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case_ = MSELoss()
if self.num_labels == 1:
snake_case_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case_ = loss_fct(snake_case , snake_case )
elif self.config.problem_type == "single_label_classification":
snake_case_ = CrossEntropyLoss()
snake_case_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case_ = BCEWithLogitsLoss()
snake_case_ = loss_fct(snake_case , snake_case )
if not return_dict:
snake_case_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case , logits=snake_case , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , lowercase_ , )
class lowercase ( lowercase_ , lowercase_ ):
def __init__( self , snake_case ):
super().__init__(snake_case )
super()._init_backbone(snake_case )
snake_case_ = [config.embedding_size] + config.hidden_sizes
snake_case_ = ResNetEmbeddings(snake_case )
snake_case_ = ResNetEncoder(snake_case )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case )
@replace_return_docstrings(output_type=snake_case , config_class=_CONFIG_FOR_DOC )
def a ( self , snake_case , snake_case = None , snake_case = None ):
snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case_ = self.embedder(snake_case )
snake_case_ = self.encoder(snake_case , output_hidden_states=snake_case , return_dict=snake_case )
snake_case_ = outputs.hidden_states
snake_case_ = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
snake_case_ = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=snake_case , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case , )
| 285 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : int = [randint(-1000 , 1000 ) for i in range(10 )]
lowerCamelCase__ : Optional[int] = randint(-5000 , 5000 )
return (arr, r)
A : List[str] = make_dataset()
def lowercase_ ( _A : Any , _A : Any ):
"""simple docstring"""
for triplet in permutations(UpperCamelCase__ , 3 ):
if sum(UpperCamelCase__ ) == target:
return tuple(sorted(UpperCamelCase__ ) )
return (0, 0, 0)
def lowercase_ ( _A : Optional[Any] , _A : List[str] ):
"""simple docstring"""
arr.sort()
lowerCamelCase__ : int = len(UpperCamelCase__ )
for i in range(n - 1 ):
lowerCamelCase__ , lowerCamelCase__ : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
lowerCamelCase__ : Tuple = "\ntriplet_sum1(*dataset)\n"
lowerCamelCase__ : Dict = "\ntriplet_sum2(*dataset)\n"
lowerCamelCase__ : Union[str, Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=10000 )
lowerCamelCase__ : int = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=10000 )
return (min(UpperCamelCase__ ), min(UpperCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
A : int = solution_times()
print(f'The time for naive implementation is {times[0]}.')
print(f'The time for optimized implementation is {times[1]}.')
| 184 |
class lowercase :
def __init__( self , snake_case , snake_case , snake_case ):
snake_case_ = name
snake_case_ = value
snake_case_ = weight
def __repr__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def a ( self ):
return self.value
def a ( self ):
return self.name
def a ( self ):
return self.weight
def a ( self ):
return self.value / self.weight
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
for i in range(len(UpperCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ )
snake_case_ = []
snake_case_ , snake_case_ = 0.0, 0.0
for i in range(len(UpperCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( lowercase_ ):
def __init__( self : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any = None , lowerCamelCase__ : List[str] = None , lowerCamelCase__ : int = True , lowerCamelCase__ : Tuple = None , lowerCamelCase__ : str = False , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict = True , lowerCamelCase__ : List[str] = "arrow" , **lowerCamelCase__ : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(
split=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ , streaming=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : List[Any] = load_from_cache_file
_UpperCAmelCase : Any = file_format
_UpperCAmelCase : List[Any] = Spark(
df=lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ , working_dir=lowerCamelCase__ , **lowerCamelCase__ , )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_UpperCAmelCase : int = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCamelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 234 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = tokenizer(example['content'] , truncation=UpperCamelCase__ )['input_ids']
snake_case_ = len(example['content'] ) / len(output['input_ids'] )
return output
_UpperCAmelCase : Dict = HfArgumentParser(PretokenizationArguments)
_UpperCAmelCase : List[Any] = parser.parse_args()
if args.num_workers is None:
_UpperCAmelCase : Union[str, Any] = multiprocessing.cpu_count()
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_UpperCAmelCase : Optional[int] = time.time()
_UpperCAmelCase : List[str] = load_dataset(args.dataset_name, split="""train""")
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
_UpperCAmelCase : Tuple = time.time()
_UpperCAmelCase : Union[str, Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
_UpperCAmelCase : Dict = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 285 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase_ = logging.get_logger(__name__)
# General docstring
lowercase_ = """RegNetConfig"""
# Base docstring
lowercase_ = """facebook/regnet-y-040"""
lowercase_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
lowercase_ = """facebook/regnet-y-040"""
lowercase_ = """tabby, tabby cat"""
lowercase_ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 3 , lowerCAmelCase = 1 , lowerCAmelCase = 1 , lowerCAmelCase = "relu" , ) -> Tuple:
'''simple docstring'''
super().__init__()
_lowercase =nn.Convad(
lowerCAmelCase , lowerCAmelCase , kernel_size=lowerCAmelCase , stride=lowerCAmelCase , padding=kernel_size // 2 , groups=lowerCAmelCase , bias=lowerCAmelCase , )
_lowercase =nn.BatchNormad(lowerCAmelCase )
_lowercase =ACTaFN[activation] if activation is not None else nn.Identity()
def A__ ( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase =self.convolution(lowerCAmelCase )
_lowercase =self.normalization(lowerCAmelCase )
_lowercase =self.activation(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__()
_lowercase =RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_lowercase =config.num_channels
def A__ ( self , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_lowercase =self.embedder(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase =nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , stride=lowerCAmelCase , bias=lowerCAmelCase )
_lowercase =nn.BatchNormad(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.convolution(lowerCAmelCase )
_lowercase =self.normalization(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowercase =nn.AdaptiveAvgPoolad((1, 1) )
_lowercase =nn.Sequential(
nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase , lowerCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =self.attention(lowerCAmelCase )
_lowercase =hidden_state * attention
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 ) -> List[Any]:
'''simple docstring'''
super().__init__()
_lowercase =in_channels != out_channels or stride != 1
_lowercase =max(1 , out_channels // config.groups_width )
_lowercase =(
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_lowercase =nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase ) , )
_lowercase =ACTaFN[config.hidden_act]
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =hidden_state
_lowercase =self.layer(lowerCAmelCase )
_lowercase =self.shortcut(lowerCAmelCase )
hidden_state += residual
_lowercase =self.activation(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1 ) -> List[str]:
'''simple docstring'''
super().__init__()
_lowercase =in_channels != out_channels or stride != 1
_lowercase =max(1 , out_channels // config.groups_width )
_lowercase =(
RegNetShortCut(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
_lowercase =nn.Sequential(
RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , groups=lowerCAmelCase , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase , lowerCAmelCase , kernel_size=1 , activation=lowerCAmelCase ) , )
_lowercase =ACTaFN[config.hidden_act]
def A__ ( self , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =hidden_state
_lowercase =self.layer(lowerCAmelCase )
_lowercase =self.shortcut(lowerCAmelCase )
hidden_state += residual
_lowercase =self.activation(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 2 , lowerCAmelCase = 2 , ) -> Dict:
'''simple docstring'''
super().__init__()
_lowercase =RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_lowercase =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , stride=lowerCAmelCase , ) , *[layer(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) for _ in range(depth - 1 )] , )
def A__ ( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.layers(lowerCAmelCase )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
def __init__( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__()
_lowercase =nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowercase =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , depth=lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = True ) -> Tuple:
'''simple docstring'''
_lowercase =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowercase =hidden_states + (hidden_state,)
_lowercase =stage_module(lowerCAmelCase )
if output_hidden_states:
_lowercase =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase , hidden_states=lowerCAmelCase )
class __lowerCAmelCase ( lowercase_ ):
_a = RegNetConfig
_a = '''regnet'''
_a = '''pixel_values'''
_a = True
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
if isinstance(lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=False ) -> int:
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_lowercase =value
lowercase_ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase_ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , lowercase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __lowerCAmelCase ( lowercase_ ):
def __init__( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config
_lowercase =RegNetEmbeddings(lowerCAmelCase )
_lowercase =RegNetEncoder(lowerCAmelCase )
_lowercase =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ) -> Tuple:
'''simple docstring'''
_lowercase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =self.embedder(lowerCAmelCase )
_lowercase =self.encoder(
lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase )
_lowercase =encoder_outputs[0]
_lowercase =self.pooler(lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase , pooler_output=lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , lowercase_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __lowerCAmelCase ( lowercase_ ):
def __init__( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =RegNetModel(lowerCAmelCase )
# classification head
_lowercase =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ) -> Tuple:
'''simple docstring'''
_lowercase =return_dict if return_dict is not None else self.config.use_return_dict
_lowercase =self.regnet(lowerCAmelCase , output_hidden_states=lowerCAmelCase , return_dict=lowerCAmelCase )
_lowercase =outputs.pooler_output if return_dict else outputs[1]
_lowercase =self.classifier(lowerCAmelCase )
_lowercase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowercase ='regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowercase ='single_label_classification'
else:
_lowercase ='multi_label_classification'
if self.config.problem_type == "regression":
_lowercase =MSELoss()
if self.num_labels == 1:
_lowercase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowercase =loss_fct(lowerCAmelCase , lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowercase =BCEWithLogitsLoss()
_lowercase =loss_fct(lowerCAmelCase , lowerCAmelCase )
if not return_dict:
_lowercase =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase , logits=lowerCAmelCase , hidden_states=outputs.hidden_states )
| 205 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
if edge <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
a_ = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
a_ = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCAmelCase__ = bs[:]
lowerCAmelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase__ = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def _a ( UpperCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
return pairs
class lowercase__ ( lowercase_ ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =['''input_ids''', '''attention_mask''']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , **__UpperCAmelCase , )-> str:
'''simple docstring'''
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__UpperCAmelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase__ = errors # how to handle errors in decoding
lowerCAmelCase__ = bytes_to_unicode()
lowerCAmelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {}
lowerCAmelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase__ = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Tuple:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(__UpperCAmelCase )
lowerCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(__UpperCAmelCase )
lowerCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ = " ".join(__UpperCAmelCase )
lowerCAmelCase__ = word
return word
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
for token in re.findall(self.pat , __UpperCAmelCase ):
lowerCAmelCase__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(" " ) )
return bpe_tokens
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = "".join(__UpperCAmelCase )
lowerCAmelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Optional[int]:
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
lowerCAmelCase__ = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False )-> Union[str, Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
lowerCAmelCase__ = " " + text
return (text, kwargs)
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Optional[int]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__UpperCAmelCase )
lowerCAmelCase__ = " ".join(__UpperCAmelCase )
lowerCAmelCase__ = self.encode(__UpperCAmelCase )
if len(__UpperCAmelCase ) > self.model_max_length:
lowerCAmelCase__ = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 340 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase ( unittest.TestCase ):
def a ( self ):
snake_case_ = tempfile.mkdtemp()
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
snake_case_ = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'do_convert_rgb': True,
}
snake_case_ = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def a ( self , **snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , **snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def a ( self , **snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def a ( self ):
shutil.rmtree(self.tmpdirname )
def a ( self ):
snake_case_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a ( self ):
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def a ( self ):
snake_case_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
snake_case_ = self.get_image_processor(do_normalize=snake_case )
snake_case_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(snake_case , return_tensors='np' )
snake_case_ = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = processor(text=snake_case )
snake_case_ = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(snake_case )
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def a ( self ):
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
snake_case_ = 'Alexandra,T-shirt的价格是15便士。'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 285 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase ):
return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] )
def lowerCAmelCase_ ( __lowerCamelCase ):
if (len(UpperCamelCase__ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCamelCase__ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
lowerCamelCase_ : int = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ : Optional[Any] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase_ : int = {
'google/fnet-base': 5_12,
'google/fnet-large': 5_12,
}
lowerCamelCase_ : List[str] = '▁'
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : List[str] = ["""input_ids""", """token_type_ids"""]
lowercase_ : Optional[Any] = FNetTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_="<unk>" , snake_case_="[SEP]" , snake_case_="<pad>" , snake_case_="[CLS]" , snake_case_="[MASK]" , **snake_case_ , ):
"""simple docstring"""
A_ : int = (
AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ , normalized=snake_case_ )
if isinstance(snake_case_ , snake_case_ )
else mask_token
)
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , **snake_case_ , )
A_ : Dict = do_lower_case
A_ : str = remove_space
A_ : Tuple = keep_accents
A_ : Union[str, Any] = vocab_file
A_ : Any = False if not self.vocab_file else True
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : List[str] = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : Optional[Any] = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : Optional[Any] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,) | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowerCamelCase_ : Dict = 'PoolFormerConfig'
# Base docstring
lowerCamelCase_ : Any = 'sail/poolformer_s12'
lowerCamelCase_ : List[str] = [1, 5_12, 7, 7]
# Image classification docstring
lowerCamelCase_ : List[str] = 'sail/poolformer_s12'
lowerCamelCase_ : Any = 'tabby, tabby cat'
lowerCamelCase_ : Union[str, Any] = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A_ : List[str] = 1 - drop_prob
A_ : Any = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A_ : List[Any] = keep_prob + torch.rand(_UpperCAmelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A_ : Union[str, Any] = input.div(_UpperCAmelCase ) * random_tensor
return output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ = None ):
"""simple docstring"""
super().__init__()
A_ : Tuple = drop_prob
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return drop_path(snake_case_ , self.drop_prob , self.training )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return "p={}".format(self.drop_prob )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ):
"""simple docstring"""
super().__init__()
A_ : Dict = patch_size if isinstance(snake_case_ , collections.abc.Iterable ) else (patch_size, patch_size)
A_ : Tuple = stride if isinstance(snake_case_ , collections.abc.Iterable ) else (stride, stride)
A_ : Dict = padding if isinstance(snake_case_ , collections.abc.Iterable ) else (padding, padding)
A_ : Dict = nn.Convad(snake_case_ , snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=snake_case_ )
A_ : Optional[int] = norm_layer(snake_case_ ) if norm_layer else nn.Identity()
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = self.projection(snake_case_ )
A_ : List[Any] = self.norm(snake_case_ )
return embeddings
class _UpperCAmelCase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , snake_case_ , **snake_case_ ):
"""simple docstring"""
super().__init__(1 , snake_case_ , **snake_case_ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
super().__init__()
A_ : Any = nn.AvgPoolad(snake_case_ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.pool(snake_case_ ) - hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
super().__init__()
A_ : Optional[Any] = nn.Convad(snake_case_ , snake_case_ , 1 )
A_ : int = nn.Convad(snake_case_ , snake_case_ , 1 )
A_ : List[Any] = PoolFormerDropPath(snake_case_ )
if isinstance(config.hidden_act , snake_case_ ):
A_ : Optional[int] = ACTaFN[config.hidden_act]
else:
A_ : str = config.hidden_act
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Any = self.conva(snake_case_ )
A_ : Tuple = self.act_fn(snake_case_ )
A_ : Optional[int] = self.drop(snake_case_ )
A_ : List[str] = self.conva(snake_case_ )
A_ : Union[str, Any] = self.drop(snake_case_ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
super().__init__()
A_ : Any = PoolFormerPooling(snake_case_ )
A_ : str = PoolFormerOutput(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
A_ : str = PoolFormerGroupNorm(snake_case_ )
A_ : List[Any] = PoolFormerGroupNorm(snake_case_ )
# Useful for training neural nets
A_ : Any = PoolFormerDropPath(snake_case_ ) if drop_path > 0.0 else nn.Identity()
A_ : Dict = config.use_layer_scale
if config.use_layer_scale:
A_ : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case_) ) , requires_grad=snake_case_ )
A_ : int = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case_) ) , requires_grad=snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.use_layer_scale:
A_ : Any = self.pooling(self.before_norm(snake_case_ ) )
A_ : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A_ : Dict = hidden_states + self.drop_path(snake_case_ )
A_ : Optional[int] = ()
A_ : List[Any] = self.output(self.after_norm(snake_case_ ) )
A_ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A_ : List[Any] = hidden_states + self.drop_path(snake_case_ )
A_ : Tuple = (output,) + outputs
return outputs
else:
A_ : Tuple = self.drop_path(self.pooling(self.before_norm(snake_case_ ) ) )
# First residual connection
A_ : int = pooling_output + hidden_states
A_ : Any = ()
# Second residual connection inside the PoolFormerOutput block
A_ : List[Any] = self.drop_path(self.output(self.after_norm(snake_case_ ) ) )
A_ : str = hidden_states + layer_output
A_ : str = (output,) + outputs
return outputs
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
super().__init__()
A_ : List[str] = config
# stochastic depth decay rule
A_ : List[str] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A_ : Optional[Any] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A_ : int = nn.ModuleList(snake_case_ )
# Transformer blocks
A_ : Optional[int] = []
A_ : Tuple = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A_ : str = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
snake_case_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(snake_case_ ) )
A_ : Dict = nn.ModuleList(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False , snake_case_=True ):
"""simple docstring"""
A_ : str = () if output_hidden_states else None
A_ : int = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A_ , A_ : Any = layers
# Get patch embeddings from hidden_states
A_ : Tuple = embedding_layer(snake_case_ )
# Send the embeddings through the blocks
for _, blk in enumerate(snake_case_ ):
A_ : List[str] = blk(snake_case_ )
A_ : Any = layer_outputs[0]
if output_hidden_states:
A_ : List[Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case_ , hidden_states=snake_case_ )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Any = PoolFormerConfig
lowercase_ : List[str] = """poolformer"""
lowercase_ : Union[str, Any] = """pixel_values"""
lowercase_ : int = True
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : Optional[int] = value
lowerCamelCase_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase_ : Any = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , UpperCAmelCase__ , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
super().__init__(snake_case_ )
A_ : Optional[int] = config
A_ : List[Any] = PoolFormerEncoder(snake_case_ )
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
A_ : List[Any] = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , )
A_ : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case_ , hidden_states=encoder_outputs.hidden_states , )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
super().__init__()
A_ : Any = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.dense(snake_case_ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , UpperCAmelCase__ , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
super().__init__(snake_case_ )
A_ : Dict = config.num_labels
A_ : str = PoolFormerModel(snake_case_ )
# Final norm
A_ : Any = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A_ : List[str] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.poolformer(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ , )
A_ : Optional[int] = outputs[0]
A_ : Optional[int] = self.classifier(self.norm(snake_case_ ).mean([-2, -1] ) )
A_ : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Tuple = 'single_label_classification'
else:
A_ : str = 'multi_label_classification'
if self.config.problem_type == "regression":
A_ : int = MSELoss()
if self.num_labels == 1:
A_ : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A_ : List[str] = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
A_ : str = CrossEntropyLoss()
A_ : Any = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Any = BCEWithLogitsLoss()
A_ : Any = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
A_ : Optional[int] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states ) | 286 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
def __call__( self ):
"""simple docstring"""
A_ : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A_ : List[str] = 1
A_ : List[str] = self.unet(snake_case_ , snake_case_ ).sample
A_ : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A_ : List[Any] = scheduler_output - scheduler_output + torch.ones_like(snake_case_ )
return result | 286 | 1 |
"""simple docstring"""
import sys
lowerCamelCase_ : Optional[int] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCAmelCase__ ( _UpperCAmelCase = N ):
"""simple docstring"""
A_ : Optional[int] = -sys.maxsize - 1
for i in range(len(_UpperCAmelCase ) - 12 ):
A_ : Any = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A_ : List[Any] = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }") | 286 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
"""simple docstring"""
A_ , A_ : List[str] = grid.shape
A_ : Optional[int] = [-1, 1, 0, 0]
A_ : str = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A_ , A_ : List[Any] = [(0, source)], set()
A_ : Optional[Any] = np.full((rows, cols) , np.inf )
A_ : int = 0
A_ : Optional[int] = np.empty((rows, cols) , dtype=_UpperCAmelCase )
A_ : Optional[int] = None
while queue:
((A_) , (A_)) : str = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A_ : int = []
while (x, y) != source:
path.append((x, y) )
A_ , A_ : List[Any] = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
A_ , A_ : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A_ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
A_ : Optional[Any] = dist + 1
A_ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 0.0 , _UpperCAmelCase = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 286 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCamelCase_ : Union[str, Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = state_dict.pop(_UpperCAmelCase )
A_ : str = val
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A_ : List[str] = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
A_ : Tuple = value
else:
A_ : Union[str, Any] = value
return new_state_dict
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A_ : Tuple = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : int = in_proj_weight[:256, :]
A_ : Dict = in_proj_bias[:256]
A_ : Optional[int] = in_proj_weight[256:512, :]
A_ : Any = in_proj_bias[256:512]
A_ : Optional[Any] = in_proj_weight[-256:, :]
A_ : List[Any] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A_ : Dict = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
A_ : int = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Union[str, Any] = in_proj_weight[:256, :]
A_ : Any = in_proj_bias[:256]
A_ : Dict = in_proj_weight[256:512, :]
A_ : Union[str, Any] = in_proj_bias[256:512]
A_ : Any = in_proj_weight[-256:, :]
A_ : Tuple = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A_ : Union[str, Any] = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
A_ : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A_ : str = in_proj_weight_cross_attn[:256, :]
A_ : int = in_proj_bias_cross_attn[:256]
A_ : List[Any] = in_proj_weight_cross_attn[256:512, :]
A_ : Any = in_proj_bias_cross_attn[256:512]
A_ : str = in_proj_weight_cross_attn[-256:, :]
A_ : int = in_proj_bias_cross_attn[-256:]
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ : Tuple = image.size
A_ : Tuple = max(_UpperCAmelCase , _UpperCAmelCase )
A_ : str = 800 if 'detection' in checkpoint_url else 1000
A_ : Optional[int] = target_max_size / current_max_size
A_ : Union[str, Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = F.to_tensor(_UpperCAmelCase )
A_ : List[str] = F.normalize(_UpperCAmelCase , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
A_ : str = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Optional[int] = rename_backbone_keys(_UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A_ : Optional[int] = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
A_ : Tuple = state_dict.pop(_UpperCAmelCase )
A_ : str = val
# create HuggingFace model and load state dict
A_ : List[str] = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A_ : Union[str, Any] = 15
A_ : Union[str, Any] = 2
A_ : int = {0: 'table', 1: 'table rotated'}
A_ : str = idalabel
A_ : str = {v: k for k, v in idalabel.items()}
else:
A_ : Dict = 125
A_ : str = 6
A_ : Optional[int] = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
A_ : int = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
A_ : Any = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 )
A_ : Tuple = TableTransformerForObjectDetection(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# verify our conversion
A_ : Union[str, Any] = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
A_ : Any = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=_UpperCAmelCase )
A_ : Optional[Any] = Image.open(_UpperCAmelCase ).convert('RGB' )
A_ : Dict = normalize(resize(_UpperCAmelCase , _UpperCAmelCase ) ).unsqueeze(0 )
A_ : str = model(_UpperCAmelCase )
if "detection" in checkpoint_url:
A_ : Any = (1, 15, 3)
A_ : List[Any] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
A_ : Dict = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
A_ : Tuple = (1, 125, 7)
A_ : int = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
A_ : Dict = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
A_ : Any = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(_UpperCAmelCase )
image_processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase_ : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 286 |
"""simple docstring"""
import os
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Any = os.path.join(os.path.dirname(_UpperCAmelCase ) , 'num.txt' )
with open(_UpperCAmelCase ) as file_hand:
return str(sum(int(_UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 286 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCamelCase_ : List[Any] = (7_20, 12_80) # Height, Width
lowerCamelCase_ : str = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCamelCase_ : List[Any] = 1 / 1_00
lowerCamelCase_ : Optional[Any] = ''
lowerCamelCase_ : Union[str, Any] = ''
lowerCamelCase_ : Optional[int] = ''
lowerCamelCase_ : Dict = 2_50
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ , A_ : str = get_dataset(_UpperCAmelCase , _UpperCAmelCase )
for index in range(_UpperCAmelCase ):
A_ : List[str] = random.sample(range(len(_UpperCAmelCase ) ) , 4 )
A_ , A_ , A_ : Tuple = update_image_and_anno(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , filter_scale=_UpperCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A_ : Any = random_chars(32 )
A_ : int = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
A_ : Any = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
A_ : Tuple = []
for anno in new_annos:
A_ : Tuple = anno[3] - anno[1]
A_ : Optional[int] = anno[4] - anno[2]
A_ : str = anno[1] + width / 2
A_ : str = anno[2] + height / 2
A_ : Optional[int] = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_UpperCAmelCase )
with open(f"""{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = []
A_ : Tuple = []
for label_file in glob.glob(os.path.join(_UpperCAmelCase , '*.txt' ) ):
A_ : Tuple = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(_UpperCAmelCase ) as in_file:
A_ : Any = in_file.readlines()
A_ : int = os.path.join(_UpperCAmelCase , f"""{label_name}.jpg""" )
A_ : List[Any] = []
for obj_list in obj_lists:
A_ : Any = obj_list.rstrip('\n' ).split(' ' )
A_ : str = float(obj[1] ) - float(obj[3] ) / 2
A_ : Optional[int] = float(obj[2] ) - float(obj[4] ) / 2
A_ : List[Any] = float(obj[1] ) + float(obj[3] ) / 2
A_ : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , ):
"""simple docstring"""
A_ : str = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
A_ : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A_ : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
A_ : List[str] = int(scale_x * output_size[1] )
A_ : str = int(scale_y * output_size[0] )
A_ : Union[str, Any] = []
A_ : Dict = []
for i, index in enumerate(_UpperCAmelCase ):
A_ : Optional[int] = all_img_list[index]
path_list.append(_UpperCAmelCase )
A_ : str = all_annos[index]
A_ : Optional[Any] = cva.imread(_UpperCAmelCase )
if i == 0: # top-left
A_ : Union[str, Any] = cva.resize(_UpperCAmelCase , (divid_point_x, divid_point_y) )
A_ : Union[str, Any] = img
for bbox in img_annos:
A_ : int = bbox[1] * scale_x
A_ : Tuple = bbox[2] * scale_y
A_ : Optional[int] = bbox[3] * scale_x
A_ : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
A_ : Optional[Any] = cva.resize(_UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
A_ : Tuple = img
for bbox in img_annos:
A_ : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
A_ : Dict = bbox[2] * scale_y
A_ : Tuple = scale_x + bbox[3] * (1 - scale_x)
A_ : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
A_ : Tuple = cva.resize(_UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
A_ : Optional[Any] = img
for bbox in img_annos:
A_ : Union[str, Any] = bbox[1] * scale_x
A_ : List[str] = scale_y + bbox[2] * (1 - scale_y)
A_ : Any = bbox[3] * scale_x
A_ : List[Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
A_ : Optional[Any] = cva.resize(
_UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
A_ : List[str] = img
for bbox in img_annos:
A_ : str = scale_x + bbox[1] * (1 - scale_x)
A_ : List[str] = scale_y + bbox[2] * (1 - scale_y)
A_ : List[str] = scale_x + bbox[3] * (1 - scale_x)
A_ : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
A_ : Union[str, Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
A_ : Tuple = ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅') | 286 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ : Dict = get_logger(__name__)
lowerCamelCase_ : List[str] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
"""simple docstring"""
for processor in self:
A_ : Tuple = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
A_ : Tuple = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
A_ : Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A_ : Optional[int] = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = scores / self.temperature
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A_ : str = top_p
A_ : Union[str, Any] = filter_value
A_ : int = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : Tuple = lax.top_k(snake_case_ , scores.shape[-1] )
A_ : List[Any] = jnp.full_like(snake_case_ , self.filter_value )
A_ : List[str] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
A_ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A_ : Union[str, Any] = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
A_ : int = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
A_ : Optional[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A_ : str = max(snake_case_ , snake_case_ )
A_ : Union[str, Any] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : int = scores.shape
A_ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
A_ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
A_ , A_ : Dict = lax.top_k(snake_case_ , snake_case_ )
A_ : Optional[int] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A_ : int = topk_scores.flatten()
A_ : Any = topk_indices.flatten() + shift
A_ : List[str] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
A_ : Union[str, Any] = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
A_ : str = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = max_length
A_ : Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A_ : Dict = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A_ : Any = min_length
A_ : List[Any] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A_ : Optional[Any] = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
A_ : Tuple = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
A_ : int = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Any = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A_ : Tuple = force_token_array.at[index].set(snake_case_ )
A_ : Any = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
def _force_token(snake_case_ ):
A_ : List[Any] = scores.shape[0]
A_ : Any = self.force_token_array[generation_idx]
A_ : Tuple = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float('inf' )
A_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A_ : int = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
A_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = generate_config.eos_token_id
A_ : Optional[int] = generate_config.no_timestamps_token_id
A_ : List[str] = generate_config.no_timestamps_token_id + 1
A_ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , 'max_initial_timestamp_index' ):
A_ : List[Any] = generate_config.max_initial_timestamp_index
else:
A_ : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A_ : Optional[Any] = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(snake_case_ , snake_case_ ):
A_ : Any = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
A_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
A_ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , snake_case_ , )
A_ : Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
A_ : Optional[Any] = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
A_ : int = self.timestamp_begin + self.max_initial_timestamp_index
A_ : List[Any] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A_ : Any = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
A_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A_ : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , snake_case_ , )
A_ : Union[str, Any] = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores | 286 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A_ : List[Any] = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=snake_case_ , cache_dir=snake_case_ )
A_ : Any = [t[-1] for t in os.walk(os.path.join(snake_case_ , os.listdir(snake_case_ )[0] , 'snapshots' ) )]
A_ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=snake_case_ )
A_ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A_ : Optional[int] = jax.random.PRNGKey(0 )
A_ : List[Any] = 4
A_ : int = jax.device_count()
A_ : Any = num_samples * [prompt]
A_ : Union[str, Any] = pipeline.prepare_inputs(snake_case_ )
# shard inputs and rng
A_ : str = replicate(snake_case_ )
A_ : List[Any] = jax.random.split(snake_case_ , snake_case_ )
A_ : Any = shard(snake_case_ )
A_ : List[Any] = pipeline(snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1E-3
assert np.abs(np.abs(snake_case_ , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5E-1
A_ : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(snake_case_ ) == num_samples
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=snake_case_ )
A_ : Any = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A_ : Optional[Any] = jax.random.PRNGKey(0 )
A_ : List[str] = 5_0
A_ : Any = jax.device_count()
A_ : int = num_samples * [prompt]
A_ : str = pipeline.prepare_inputs(snake_case_ )
# shard inputs and rng
A_ : str = replicate(snake_case_ )
A_ : Dict = jax.random.split(snake_case_ , snake_case_ )
A_ : Optional[int] = shard(snake_case_ )
A_ : Any = pipeline(snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1E-3
assert np.abs((np.abs(snake_case_ , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5E-1
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case_ )
A_ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A_ : Optional[Any] = jax.random.PRNGKey(0 )
A_ : int = 5_0
A_ : List[Any] = jax.device_count()
A_ : Dict = num_samples * [prompt]
A_ : str = pipeline.prepare_inputs(snake_case_ )
# shard inputs and rng
A_ : Dict = replicate(snake_case_ )
A_ : str = jax.random.split(snake_case_ , snake_case_ )
A_ : Any = shard(snake_case_ )
A_ : List[str] = pipeline(snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(snake_case_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A_ : List[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A_ : Dict = jax.random.PRNGKey(0 )
A_ : Any = 5_0
A_ : List[Any] = jax.device_count()
A_ : Any = num_samples * [prompt]
A_ : int = pipeline.prepare_inputs(snake_case_ )
# shard inputs and rng
A_ : str = replicate(snake_case_ )
A_ : str = jax.random.split(snake_case_ , snake_case_ )
A_ : Optional[int] = shard(snake_case_ )
A_ : Tuple = pipeline(snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1E-3
assert np.abs((np.abs(snake_case_ , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5E-1
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=snake_case_ , steps_offset=1 , )
A_ , A_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=snake_case_ , safety_checker=snake_case_ , )
A_ : List[str] = scheduler.create_state()
A_ : Any = scheduler_state
A_ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A_ : Union[str, Any] = jax.random.PRNGKey(0 )
A_ : Union[str, Any] = 5_0
A_ : Union[str, Any] = jax.device_count()
A_ : List[Any] = num_samples * [prompt]
A_ : Union[str, Any] = pipeline.prepare_inputs(snake_case_ )
# shard inputs and rng
A_ : Dict = replicate(snake_case_ )
A_ : List[str] = jax.random.split(snake_case_ , snake_case_ )
A_ : Any = shard(snake_case_ )
A_ : Any = pipeline(snake_case_ , snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1E-3
assert np.abs((np.abs(snake_case_ , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5E-1
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A_ : Dict = jax.device_count()
A_ : Any = num_samples * [prompt]
A_ : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , snake_case_ )
A_ , A_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case_ , )
A_ : Union[str, Any] = replicate(snake_case_ )
A_ : str = pipeline.prepare_inputs(snake_case_ )
A_ : str = shard(snake_case_ )
A_ : Any = pipeline(snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
A_ : Optional[int] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
A_ , A_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=snake_case_ , use_memory_efficient_attention=snake_case_ , )
A_ : str = replicate(snake_case_ )
A_ : Any = pipeline.prepare_inputs(snake_case_ )
A_ : Optional[int] = shard(snake_case_ )
A_ : Optional[Any] = pipeline(snake_case_ , snake_case_ , snake_case_ , jit=snake_case_ ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
A_ : Optional[int] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2 | 286 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase ) | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=3 , snake_case_=None , ):
"""simple docstring"""
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : Any = patch_size
A_ : Tuple = num_channels
A_ : Any = is_training
A_ : List[Any] = use_labels
A_ : Dict = hidden_size
A_ : Tuple = num_hidden_layers
A_ : int = num_attention_heads
A_ : str = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : int = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Optional[Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[Any] = (image_size // patch_size) ** 2
A_ : int = num_patches + 1
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = TFViTModel(config=snake_case_ )
A_ : List[str] = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A_ : str = self.image_size // 2
A_ : List[str] = pixel_values[:, :, :image_size, :image_size]
A_ : Optional[Any] = model(snake_case_ , interpolate_pos_encoding=snake_case_ , training=snake_case_ )
A_ : Tuple = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = self.type_sequence_label_size
A_ : int = TFViTForImageClassification(snake_case_ )
A_ : Any = model(snake_case_ , labels=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A_ : Optional[Any] = self.image_size // 2
A_ : Tuple = pixel_values[:, :, :image_size, :image_size]
A_ : Any = model(snake_case_ , interpolate_pos_encoding=snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Tuple = 1
A_ : int = TFViTForImageClassification(snake_case_ )
A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Dict = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = self.prepare_config_and_inputs()
A_ , A_ , A_ : Any = config_and_inputs
A_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase_ : Dict = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Any = False
lowercase_ : int = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = TFViTModelTester(self )
A_ : Optional[Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Union[str, Any] = model_class(snake_case_ )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(snake_case_ )
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
A_ : Tuple = self.default_image_processor
A_ : Optional[Any] = prepare_img()
A_ : List[Any] = image_processor(images=snake_case_ , return_tensors='tf' )
# forward pass
A_ : str = model(**snake_case_ )
# verify the logits
A_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A_ : Tuple = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) | 286 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = CustomTokenizer
pass | 286 | 1 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Any = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase_ : Any = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Any = """esm"""
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0_2_6 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=None , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , mask_token_id=snake_case_ , **snake_case_ )
A_ : List[str] = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : int = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : List[Any] = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : Any = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : int = position_embedding_type
A_ : Tuple = use_cache
A_ : List[str] = emb_layer_norm_before
A_ : Union[str, Any] = token_dropout
A_ : Optional[Any] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
A_ : List[Any] = EsmFoldConfig()
elif isinstance(snake_case_ , snake_case_ ):
A_ : Optional[int] = EsmFoldConfig(**snake_case_ )
A_ : str = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
A_ : str = get_default_vocab_list()
else:
A_ : Dict = vocab_list
else:
A_ : List[Any] = None
A_ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , snake_case_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = super().to_dict()
if isinstance(self.esmfold_config , snake_case_ ):
A_ : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : str = None
lowercase_ : bool = True
lowercase_ : bool = False
lowercase_ : bool = False
lowercase_ : bool = False
lowercase_ : float = 0
lowercase_ : bool = True
lowercase_ : bool = False
lowercase_ : int = 128
lowercase_ : "TrunkConfig" = None
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.trunk is None:
A_ : Optional[Any] = TrunkConfig()
elif isinstance(self.trunk , snake_case_ ):
A_ : Optional[Any] = TrunkConfig(**self.trunk )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = asdict(self )
A_ : List[Any] = self.trunk.to_dict()
return output
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int = 48
lowercase_ : int = 1_024
lowercase_ : int = 128
lowercase_ : int = 32
lowercase_ : int = 32
lowercase_ : int = 32
lowercase_ : float = 0
lowercase_ : float = 0
lowercase_ : bool = False
lowercase_ : int = 4
lowercase_ : Optional[int] = 128
lowercase_ : "StructureModuleConfig" = None
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.structure_module is None:
A_ : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , snake_case_ ):
A_ : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
A_ : str = self.sequence_state_dim // self.sequence_head_width
A_ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = asdict(self )
A_ : Optional[Any] = self.structure_module.to_dict()
return output
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int = 384
lowercase_ : int = 128
lowercase_ : int = 16
lowercase_ : int = 128
lowercase_ : int = 12
lowercase_ : int = 4
lowercase_ : int = 8
lowercase_ : float = 0.1
lowercase_ : int = 8
lowercase_ : int = 1
lowercase_ : int = 2
lowercase_ : int = 7
lowercase_ : int = 10
lowercase_ : float = 1e-8
lowercase_ : float = 1e5
def lowerCamelCase_ ( self ):
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
) | 286 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.get_masked_index(snake_case_ )
A_ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ):
"""simple docstring"""
if return_tensors is None:
A_ : Any = self.framework
A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.model(**snake_case_ )
A_ : Optional[int] = model_inputs['input_ids']
return model_outputs
def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : str = target_ids.shape[0]
A_ : Optional[Any] = model_outputs['input_ids'][0]
A_ : List[Any] = model_outputs['logits']
if self.framework == "tf":
A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : Optional[int] = outputs[0, masked_index, :]
A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ )
A_ , A_ : str = topk.values.numpy(), topk.indices.numpy()
else:
A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : Tuple = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : str = probs[..., target_ids]
A_ , A_ : List[str] = probs.topk(snake_case_ )
A_ : List[Any] = []
A_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : str = target_ids[p].tolist()
A_ : Union[str, Any] = p
# Filter padding out:
A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : int = {}
A_ : Tuple = []
for target in targets:
A_ : int = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A_ : Tuple = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids']
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A_ : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A_ : Tuple = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A_ : Optional[Any] = np.array(snake_case_ )
return target_ids
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = {}
if targets is not None:
A_ : Any = self.get_target_ids(snake_case_ , snake_case_ )
A_ : Optional[Any] = target_ids
if top_k is not None:
A_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs | 286 | 1 |
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Any = VQModel
lowercase_ : Dict = """sample"""
@property
def lowerCamelCase_ ( self , snake_case_=(3_2, 3_2) ):
"""simple docstring"""
A_ : Union[str, Any] = 4
A_ : int = 3
A_ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
return {"sample": image}
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (3, 3_2, 3_2)
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ : int = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(snake_case_ )
A_ : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(snake_case_ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
A_ : Optional[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
A_ : Optional[Any] = image.to(snake_case_ )
with torch.no_grad():
A_ : List[str] = model(snake_case_ ).sample
A_ : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ : Optional[Any] = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) ) | 286 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
super().__init__(*snake_case_ , **snake_case_ )
A_ : Tuple = {}
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = super().add_tokens(snake_case_ , *snake_case_ , **snake_case_ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , snake_case_=1 , **snake_case_ ):
"""simple docstring"""
A_ : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
else:
A_ : Tuple = []
for i in range(snake_case_ ):
A_ : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A_ : Any = output
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False , snake_case_=1.0 ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : Optional[Any] = []
for i in range(len(snake_case_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A_ : List[Any] = self.token_map[placeholder_token]
A_ : Optional[int] = tokens[: 1 + int(len(snake_case_ ) * prop_tokens_to_load )]
if vector_shuffle:
A_ : Optional[Any] = copy.copy(snake_case_ )
random.shuffle(snake_case_ )
A_ : List[str] = text.replace(snake_case_ , ' '.join(snake_case_ ) )
return text
def __call__( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , ) | 286 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 286 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
A_ : Any = hex_num[0] == '-'
if is_negative:
A_ : Optional[Any] = hex_num[1:]
try:
A_ : Tuple = int(_UpperCAmelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
A_ : Union[str, Any] = ''
while int_num > 0:
A_ : Optional[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
create_state_space_tree(_UpperCAmelCase , [] , 0 )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if index == len(_UpperCAmelCase ):
print(_UpperCAmelCase )
return
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_UpperCAmelCase , _UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCamelCase_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq) | 286 |
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}") | 286 | 1 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = 0
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Dict = Path(snake_case_ ) / 'preprocessor_config.json'
A_ : List[Any] = Path(snake_case_ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(snake_case_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(snake_case_ , 'w' ) )
A_ : List[Any] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[int] = Path(snake_case_ ) / 'preprocessor_config.json'
A_ : str = Path(snake_case_ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(snake_case_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(snake_case_ , 'w' ) )
A_ : Optional[int] = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A_ : Optional[Any] = Path(snake_case_ ) / 'preprocessor_config.json'
A_ : List[str] = Path(snake_case_ ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(snake_case_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(snake_case_ , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case_ ).to_dict()
config_dict.pop('image_processor_type' )
A_ : Optional[Any] = CLIPImageProcessor(**snake_case_ )
# save in new folder
model_config.save_pretrained(snake_case_ )
config.save_pretrained(snake_case_ )
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(snake_case_ )
# make sure private variable is not incorrectly saved
A_ : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Tuple = Path(snake_case_ ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(snake_case_ , 'w' ) , )
A_ : Any = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case_ , 'clip-base is not a local folder and is not a valid model identifier' ):
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained('clip-base' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case_ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
A_ : Optional[int] = AutoImageProcessor.from_pretrained(snake_case_ , revision='aaaaaa' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
A_ : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaises(snake_case_ ):
A_ : List[str] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_ ):
A_ : List[str] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=snake_case_ )
A_ : List[str] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case_ )
A_ : Any = AutoImageProcessor.from_pretrained(snake_case_ , trust_remote_code=snake_case_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
try:
AutoConfig.register('custom' , snake_case_ )
AutoImageProcessor.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
AutoImageProcessor.register(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[Any] = Path(snake_case_ ) / 'preprocessor_config.json'
A_ : Tuple = Path(snake_case_ ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(snake_case_ , 'w' ) , )
json.dump({'model_type': 'clip'} , open(snake_case_ , 'w' ) )
A_ : List[str] = CustomImageProcessor.from_pretrained(snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case_ )
A_ : Any = AutoImageProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self ):
"""simple docstring"""
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = True
try:
AutoConfig.register('custom' , snake_case_ )
AutoImageProcessor.register(snake_case_ , snake_case_ )
# If remote code is not set, the default is to use local
A_ : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=snake_case_ )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(snake_case_ , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 286 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """xmod"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=("en_XX",) , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : int = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Any = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : int = use_cache
A_ : Dict = classifier_dropout
A_ : int = pre_norm
A_ : Optional[Any] = adapter_reduction_factor
A_ : List[Any] = adapter_layer_norm
A_ : int = adapter_reuse_layer_norm
A_ : Dict = ln_before_adapter
A_ : List[str] = list(snake_case_ )
A_ : Union[str, Any] = default_language
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 286 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[int] = """bridgetower_vision_model"""
def __init__( self , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=3 , snake_case_=1_6 , snake_case_=2_8_8 , snake_case_=1 , snake_case_=1E-05 , snake_case_=False , snake_case_=True , snake_case_=False , **snake_case_ , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A_ : Optional[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : Dict = num_channels
A_ : str = patch_size
A_ : List[str] = image_size
A_ : List[str] = initializer_factor
A_ : Dict = layer_norm_eps
A_ : Dict = stop_gradient
A_ : Any = share_layernorm
A_ : Optional[int] = remove_last_layer
@classmethod
def lowerCamelCase_ ( cls , snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ , A_ : Dict = cls.get_config_dict(snake_case_ , **snake_case_ )
if config_dict.get('model_type' ) == "bridgetower":
A_ : Tuple = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[Any] = """bridgetower_text_model"""
def __init__( self , snake_case_=5_0_2_6_5 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=1 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_4 , snake_case_=1 , snake_case_=1E-05 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , **snake_case_ , ):
"""simple docstring"""
super().__init__(**snake_case_ )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : List[str] = hidden_act
A_ : Tuple = initializer_factor
A_ : List[str] = intermediate_size
A_ : str = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[Any] = layer_norm_eps
A_ : List[Any] = position_embedding_type
A_ : List[str] = use_cache
A_ : str = pad_token_id
A_ : Union[str, Any] = bos_token_id
A_ : Dict = eos_token_id
@classmethod
def lowerCamelCase_ ( cls , snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ , A_ : List[Any] = cls.get_config_dict(snake_case_ , **snake_case_ )
if config_dict.get('model_type' ) == "bridgetower":
A_ : List[Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case_ , **snake_case_ )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : int = """bridgetower"""
def __init__( self , snake_case_=True , snake_case_="gelu" , snake_case_=7_6_8 , snake_case_=1 , snake_case_=1E-05 , snake_case_=False , snake_case_="add" , snake_case_=1_2 , snake_case_=6 , snake_case_=False , snake_case_=False , snake_case_=None , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
A_ : List[Any] = kwargs.pop('text_config_dict' , snake_case_ )
A_ : Any = kwargs.pop('vision_config_dict' , snake_case_ )
super().__init__(**snake_case_ )
A_ : Optional[Any] = share_cross_modal_transformer_layers
A_ : Optional[Any] = hidden_act
A_ : int = hidden_size
A_ : int = initializer_factor
A_ : Dict = layer_norm_eps
A_ : Any = share_link_tower_layers
A_ : Optional[int] = link_tower_type
A_ : Any = num_attention_heads
A_ : str = num_hidden_layers
A_ : Union[str, Any] = tie_word_embeddings
A_ : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
A_ : Tuple = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
A_ : Union[str, Any] = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
A_ : str = BridgeTowerTextConfig(**snake_case_ )
A_ : str = BridgeTowerVisionConfig(**snake_case_ )
@classmethod
def lowerCamelCase_ ( cls , snake_case_ , snake_case_ , **snake_case_ ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.text_config.to_dict()
A_ : str = self.vision_config.to_dict()
A_ : Optional[int] = self.__class__.model_type
return output | 286 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Union[str, Any] = """ViltImageProcessor"""
lowercase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
A_ : Dict = kwargs.pop('feature_extractor' )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
A_ : List[str] = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor | 286 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
assert x is not None
assert y is not None
A_ : int = len(_UpperCAmelCase )
A_ : List[Any] = len(_UpperCAmelCase )
# declaring the array for storing the dp values
A_ : int = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
A_ : List[Any] = 1 if x[i - 1] == y[j - 1] else 0
A_ : Dict = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
A_ : str = ''
A_ , A_ : Any = m, n
while i > 0 and j > 0:
A_ : Union[str, Any] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A_ : Union[str, Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase_ : str = 'AGGTAB'
lowerCamelCase_ : str = 'GXTXAYB'
lowerCamelCase_ : List[str] = 4
lowerCamelCase_ : Tuple = 'GTAB'
lowerCamelCase_ , lowerCamelCase_ : List[str] = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod() | 286 |
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = old_name
if "patch_embed" in old_name:
A_ , A_ , A_ : Union[str, Any] = old_name.split('.' )
if layer == "0":
A_ : int = old_name.replace('0' , 'convolution1' )
elif layer == "1":
A_ : Optional[Any] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
A_ : Union[str, Any] = old_name.replace('3' , 'convolution2' )
else:
A_ : Union[str, Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , _UpperCAmelCase ):
A_ : Tuple = R'\b\d{2}\b'
if bool(re.search(_UpperCAmelCase , _UpperCAmelCase ) ):
A_ : Tuple = re.search(R'\d\.\d\d.' , _UpperCAmelCase ).group()
else:
A_ : Any = re.search(R'\d\.\d.' , _UpperCAmelCase ).group()
if int(match[0] ) < 6:
A_ : List[str] = old_name.replace(_UpperCAmelCase , '' )
A_ : Optional[int] = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
A_ : Tuple = 'intermediate_stages.' + trimmed_name
else:
A_ : Any = old_name.replace(_UpperCAmelCase , '' )
if int(match[2] ) < num_meta4D_last_stage:
A_ : Optional[int] = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
A_ : List[str] = str(int(match[2] ) - num_meta4D_last_stage )
A_ : Optional[Any] = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
A_ : Optional[Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
A_ : Dict = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
A_ : Optional[Any] = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
A_ : List[Any] = trimmed_name.replace('fc2' , 'linear_out' )
A_ : Tuple = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , _UpperCAmelCase ):
A_ : Optional[Any] = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
A_ : Tuple = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A_ : Optional[int] = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A_ : Optional[Any] = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
A_ : Optional[Any] = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
A_ : Union[str, Any] = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
A_ : Any = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
A_ : str = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A_ : List[str] = new_name.replace('norm' , 'layernorm' )
A_ : str = 'efficientformer.' + new_name
else:
A_ : Dict = 'efficientformer.encoder.' + new_name
return new_name
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for key in checkpoint.copy().keys():
A_ : List[str] = checkpoint.pop(_UpperCAmelCase )
A_ : Dict = val
return checkpoint
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : Optional[int] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = torch.load(_UpperCAmelCase , map_location='cpu' )['model']
A_ : List[str] = EfficientFormerConfig.from_json_file(_UpperCAmelCase )
A_ : Optional[int] = EfficientFormerForImageClassificationWithTeacher(_UpperCAmelCase )
A_ : Dict = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
A_ : List[Any] = config.depths[-1] - config.num_metaad_blocks + 1
A_ : Optional[int] = convert_torch_checkpoint(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
A_ : Optional[int] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
A_ : List[str] = prepare_img()
A_ : Tuple = 256
A_ : Optional[int] = 224
A_ : Any = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
A_ : Any = processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
# original processing pipeline
A_ : Dict = Compose(
[
Resize(_UpperCAmelCase , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_UpperCAmelCase ),
ToTensor(),
Normalize(_UpperCAmelCase , _UpperCAmelCase ),
] )
A_ : Optional[int] = image_transforms(_UpperCAmelCase ).unsqueeze(0 )
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
A_ : Optional[int] = model(_UpperCAmelCase )
A_ : Any = outputs.logits
A_ : Union[str, Any] = (1, 1000)
if "l1" in model_name:
A_ : str = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _UpperCAmelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A_ : Tuple = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _UpperCAmelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A_ : Any = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(_UpperCAmelCase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
) | 286 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCamelCase_ : Tuple = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def UpperCAmelCase__ ( _UpperCAmelCase = "mumbai" ):
"""simple docstring"""
A_ : str = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
A_ : List[str] = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
A_ : Optional[Any] = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}") | 286 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 286 | 1 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCamelCase_ : Union[str, Any] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
lowerCamelCase_ : Optional[Any] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
lowerCamelCase_ : int = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_=4 , snake_case_=False ):
"""simple docstring"""
A_ : List[str] = compute_bleu(
reference_corpus=snake_case_ , translation_corpus=snake_case_ , max_order=snake_case_ , smooth=snake_case_ )
((A_) , (A_) , (A_) , (A_) , (A_) , (A_)) : int = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 286 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase_ : Any = re.compile(r'\s+')
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 ):
"""simple docstring"""
A_ : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated']
A_ : List[str] = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=0.05 ):
"""simple docstring"""
A_ : Any = ['unit tests', 'test file', 'configuration file']
A_ : Dict = example['content'].splitlines()
A_ : List[Any] = 0
A_ : str = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : Tuple = example['content'].count('\n' )
A_ : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = ['def ', 'class ', 'for ', 'while ']
A_ : Tuple = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=4 ):
"""simple docstring"""
A_ : Union[str, Any] = example['content'].splitlines()
A_ : Any = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
A_ : Dict = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
lowerCamelCase_ : Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : int = multiprocessing.cpu_count()
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase_ : Tuple = time.time()
lowerCamelCase_ : Tuple = load_dataset(args.dataset_name, split='train')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase_ : int = set(ds.unique('hash'))
lowerCamelCase_ : Union[str, Any] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase_ : Optional[int] = time.time()
lowerCamelCase_ : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase_ : Union[str, Any] = time.time()
lowerCamelCase_ , lowerCamelCase_ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase_ : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase_ : Optional[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowerCamelCase_ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase_ : Optional[int] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase_ : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 286 | 1 |
"""simple docstring"""
from math import factorial, pi
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 30 ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
A_ : List[str] = float(_UpperCAmelCase )
A_ : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_UpperCAmelCase ) )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 30 ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
A_ : int = float(_UpperCAmelCase )
A_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15)) | 286 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = in_proj_bias[: config.hidden_size]
A_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = dct.pop(_UpperCAmelCase )
A_ : Optional[int] = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCAmelCase , )
A_ : Optional[int] = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=384 , num_labels=1000 )
A_ : Union[str, Any] = False
# load original model from timm
A_ : List[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
A_ : Any = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = 'huggingface/label-files'
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : List[Any] = ViTHybridModel(_UpperCAmelCase ).eval()
else:
A_ : str = ViTHybridForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
A_ : List[str] = transform.transforms
A_ : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A_ : Tuple = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Optional[Any] = prepare_img()
A_ : Any = transform(_UpperCAmelCase ).unsqueeze(0 )
A_ : Dict = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
A_ : List[Any] = model(_UpperCAmelCase )
A_ : List[str] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
A_ : Union[str, Any] = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
A_ : Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 286 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 286 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A_ : str = False
if num < 0:
A_ : Dict = True
A_ : Union[str, Any] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = size
A_ : Any = [0] * size
A_ : Dict = [0] * size
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index | (index + 1)
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return (index & (index + 1)) - 1
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = value
while index < self.size:
A_ : List[str] = self.get_prev(snake_case_ ) + 1
if current_left_border == index:
A_ : Dict = value
else:
A_ : Dict = max(snake_case_ , snake_case_ , snake_case_ )
A_ : Tuple = self.get_next(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
right -= 1 # Because of right is exclusive
A_ : Tuple = 0
while left <= right:
A_ : Any = self.get_prev(snake_case_ )
if left <= current_left:
A_ : Union[str, Any] = max(snake_case_ , self.tree[right] )
A_ : Dict = current_left
else:
A_ : str = max(snake_case_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
"""simple docstring"""
import os
def UpperCAmelCase__ ( _UpperCAmelCase = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) as input_file:
A_ : Dict = [
[int(_UpperCAmelCase ) for element in line.split(',' )]
for line in input_file.readlines()
]
A_ : List[Any] = len(_UpperCAmelCase )
A_ : Optional[int] = len(matrix[0] )
A_ : List[Any] = [[-1 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
A_ : Optional[int] = matrix[i][0]
for j in range(1 , _UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
A_ : Optional[int] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _UpperCAmelCase ):
A_ : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
A_ : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"{solution() = }") | 286 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase_ : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Union[str, Any] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
A_ : Tuple = os.path.join(_UpperCAmelCase , 'words.txt' )
A_ : List[Any] = ''
with open(_UpperCAmelCase ) as f:
A_ : int = f.readline()
A_ : Optional[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A_ : Dict = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution()) | 286 | 1 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
A_ : List[str] = flatten_dict(_UpperCAmelCase )
return flax_params
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = {}
A_ : str = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
A_ : Dict = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
A_ : List[str] = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
A_ : Dict = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
A_ : int = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
A_ : Optional[int] = re.sub(R'layers_(\d+)' , R'layer.\1' , _UpperCAmelCase )
A_ : Any = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
A_ : Any = re.sub(R'layers_(\d+)' , R'layer.\1' , _UpperCAmelCase )
A_ : Optional[Any] = flax_dict[key]
A_ : List[Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
A_ : Optional[Any] = torch.from_numpy(converted_dict[key].T )
else:
A_ : Union[str, Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Dict = get_flax_param(_UpperCAmelCase )
if not use_large:
A_ : Any = PixaStructVisionConfig()
A_ : List[str] = PixaStructTextConfig()
else:
A_ : List[str] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
A_ : Union[str, Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
A_ : List[Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_UpperCAmelCase )
A_ : str = PixaStructForConditionalGeneration(_UpperCAmelCase )
A_ : Dict = rename_and_convert_flax_params(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
A_ : int = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
A_ : List[Any] = PixaStructImageProcessor()
A_ : str = PixaStructProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
if use_large:
A_ : Tuple = 4096
A_ : Tuple = True
# mkdir if needed
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
print('Model saved in {}'.format(_UpperCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowerCamelCase_ : List[str] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
) | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=True , snake_case_=1 / 2_5_5 , snake_case_=True , ):
"""simple docstring"""
A_ : Optional[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
A_ : Dict = parent
A_ : Optional[int] = batch_size
A_ : Optional[int] = num_channels
A_ : Dict = min_resolution
A_ : str = max_resolution
A_ : Union[str, Any] = do_resize
A_ : Tuple = size
A_ : List[str] = do_normalize
A_ : Tuple = image_mean
A_ : Dict = image_std
A_ : Optional[int] = do_rescale
A_ : Any = rescale_factor
A_ : List[str] = do_pad
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False ):
"""simple docstring"""
if not batched:
A_ : Any = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
A_ , A_ : Optional[Any] = image.size
else:
A_ , A_ : int = image.shape[1], image.shape[2]
if w < h:
A_ : Dict = int(self.size['shortest_edge'] * h / w )
A_ : str = self.size['shortest_edge']
elif w > h:
A_ : int = self.size['shortest_edge']
A_ : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
A_ : Optional[int] = self.size['shortest_edge']
A_ : Any = self.size['shortest_edge']
else:
A_ : Optional[int] = []
for image in image_inputs:
A_ , A_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : Tuple = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
A_ : List[Any] = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : List[str] = DeformableDetrImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = DeformableDetrImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case_ , 'image_std' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'do_rescale' ) )
self.assertTrue(hasattr(snake_case_ , 'do_pad' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , snake_case_ )
A_ : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ , A_ : Optional[int] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
A_ : str = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ , A_ : List[Any] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Dict = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
A_ , A_ : List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
A_ , A_ : Tuple = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[Any] = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
A_ , A_ : Optional[int] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
A_ : List[str] = json.loads(f.read() )
A_ : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
A_ : Optional[int] = DeformableDetrImageProcessor()
A_ : Optional[Any] = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors='pt' )
# verify pixel values
A_ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , snake_case_ )
A_ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
A_ : Optional[Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case_ ) )
# verify boxes
A_ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case_ )
A_ : List[str] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
A_ : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case_ ) )
# verify is_crowd
A_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case_ ) )
# verify class_labels
A_ : Dict = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case_ ) )
# verify orig_size
A_ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case_ ) )
# verify size
A_ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case_ ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
A_ : int = json.loads(f.read() )
A_ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
A_ : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A_ : Any = DeformableDetrImageProcessor(format='coco_panoptic' )
A_ : Union[str, Any] = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors='pt' )
# verify pixel values
A_ : int = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , snake_case_ )
A_ : int = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
A_ : List[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , snake_case_ ) )
# verify boxes
A_ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , snake_case_ )
A_ : Optional[int] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
A_ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , snake_case_ ) )
# verify is_crowd
A_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , snake_case_ ) )
# verify class_labels
A_ : Optional[int] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , snake_case_ ) )
# verify masks
A_ : Tuple = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , snake_case_ )
# verify orig_size
A_ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , snake_case_ ) )
# verify size
A_ : Dict = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , snake_case_ ) ) | 286 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
def __call__( self ):
"""simple docstring"""
A_ : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A_ : List[str] = 1
A_ : List[str] = self.unet(snake_case_ , snake_case_ ).sample
A_ : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A_ : List[Any] = scheduler_output - scheduler_output + torch.ones_like(snake_case_ )
return result | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
lowerCamelCase_ : Union[str, Any] = list[tuple[int, int]]
lowerCamelCase_ : Optional[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = pos_x
A_ : int = pos_y
A_ : List[Any] = (pos_y, pos_x)
A_ : List[str] = goal_x
A_ : Tuple = goal_y
A_ : List[str] = parent
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , snake_case_ )
A_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , snake_case_ )
A_ : int = [self.start]
A_ : Dict = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
while self.node_queue:
A_ : List[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
A_ : Optional[Any] = True
return self.retrace_path(snake_case_ )
A_ : str = self.get_successors(snake_case_ )
for node in successors:
self.node_queue.append(snake_case_ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = []
for action in delta:
A_ : Any = parent.pos_x + action[1]
A_ : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , snake_case_ ) )
return successors
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Any = node
A_ : List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A_ : Dict = current_node.parent
path.reverse()
return path
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Any = BreadthFirstSearch(snake_case_ , snake_case_ )
A_ : Optional[Any] = BreadthFirstSearch(snake_case_ , snake_case_ )
A_ : Tuple = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
A_ : Dict = self.fwd_bfs.node_queue.pop(0 )
A_ : Any = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
A_ : List[str] = True
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
A_ : Tuple = current_bwd_node
A_ : List[Any] = current_fwd_node
A_ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(snake_case_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(snake_case_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(snake_case_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = self.fwd_bfs.retrace_path(snake_case_ )
A_ : Union[str, Any] = self.bwd_bfs.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
A_ : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase_ : Any = (0, 0)
lowerCamelCase_ : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : Tuple = BreadthFirstSearch(init, goal)
lowerCamelCase_ : int = bfs.search()
lowerCamelCase_ : List[Any] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
lowerCamelCase_ : Optional[Any] = time.time()
lowerCamelCase_ : List[str] = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase_ : Dict = bd_bfs.search()
lowerCamelCase_ : Union[str, Any] = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time) | 286 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
"""simple docstring"""
A_ , A_ : List[str] = grid.shape
A_ : Optional[int] = [-1, 1, 0, 0]
A_ : str = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A_ , A_ : List[Any] = [(0, source)], set()
A_ : Optional[Any] = np.full((rows, cols) , np.inf )
A_ : int = 0
A_ : Optional[int] = np.empty((rows, cols) , dtype=_UpperCAmelCase )
A_ : Optional[int] = None
while queue:
((A_) , (A_)) : str = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A_ : int = []
while (x, y) != source:
path.append((x, y) )
A_ , A_ : List[Any] = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
A_ , A_ : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A_ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
A_ : Optional[Any] = dist + 1
A_ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = original_name.split('.' )[0]
A_ : Any = key.split('.' )
A_ : List[str] = int(key_list[key_list.index(_UpperCAmelCase ) - 2] )
A_ : Optional[int] = int(key_list[key_list.index(_UpperCAmelCase ) - 1] )
A_ : str = orig_block_num - offset
A_ : List[Any] = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = OrderedDict()
A_ , A_ : int = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
A_ : int = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
A_ : Union[str, Any] = key[: key.find('proj' )]
A_ : Dict = key.replace(_UpperCAmelCase , f"""patch_embeddings.{total_embed_found}.""" )
A_ : List[Any] = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
A_ : List[Any] = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
A_ : Optional[int] = replace_key_with_offset(_UpperCAmelCase , _UpperCAmelCase , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
A_ : Optional[int] = replace_key_with_offset(_UpperCAmelCase , _UpperCAmelCase , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
A_ : Optional[int] = replace_key_with_offset(_UpperCAmelCase , _UpperCAmelCase , 'norm1' , 'before_norm' )
if "norm2" in key:
A_ : Union[str, Any] = replace_key_with_offset(_UpperCAmelCase , _UpperCAmelCase , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
A_ : Optional[int] = replace_key_with_offset(_UpperCAmelCase , _UpperCAmelCase , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
A_ : Optional[Any] = replace_key_with_offset(_UpperCAmelCase , _UpperCAmelCase , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
A_ : Union[str, Any] = key.replace('head' , 'classifier' )
A_ : Any = value
return new_state_dict
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : Optional[int] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = PoolFormerConfig()
# set attributes based on model_name
A_ : Optional[Any] = 'huggingface/label-files'
A_ : Optional[Any] = model_name[-3:]
A_ : Optional[Any] = 1000
A_ : Optional[int] = 'imagenet-1k-id2label.json'
A_ : Optional[int] = (1, 1000)
# set config attributes
A_ : List[Any] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Union[str, Any] = idalabel
A_ : int = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ : Tuple = [2, 2, 6, 2]
A_ : Tuple = [64, 128, 320, 512]
A_ : int = 4.0
A_ : Optional[Any] = 0.9
elif size == "s24":
A_ : Optional[Any] = [4, 4, 12, 4]
A_ : str = [64, 128, 320, 512]
A_ : str = 4.0
A_ : Optional[int] = 0.9
elif size == "s36":
A_ : List[str] = [6, 6, 18, 6]
A_ : str = [64, 128, 320, 512]
A_ : Dict = 4.0
A_ : Optional[int] = 1E-6
A_ : Tuple = 0.9
elif size == "m36":
A_ : List[Any] = [6, 6, 18, 6]
A_ : Optional[Any] = [96, 192, 384, 768]
A_ : Optional[int] = 4.0
A_ : Dict = 1E-6
A_ : Tuple = 0.95
elif size == "m48":
A_ : Tuple = [8, 8, 24, 8]
A_ : Optional[Any] = [96, 192, 384, 768]
A_ : str = 4.0
A_ : Union[str, Any] = 1E-6
A_ : Any = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
A_ : Tuple = PoolFormerImageProcessor(crop_pct=_UpperCAmelCase )
# Prepare image
A_ : str = prepare_img()
A_ : List[Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
A_ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
A_ : Tuple = rename_keys(_UpperCAmelCase )
# create HuggingFace model and load state dict
A_ : Tuple = PoolFormerForImageClassification(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# Define image processor
A_ : List[Any] = PoolFormerImageProcessor(crop_pct=_UpperCAmelCase )
A_ : Optional[Any] = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
A_ : List[str] = model(_UpperCAmelCase )
A_ : Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ : int = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
A_ : Optional[int] = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
A_ : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
A_ : Optional[Any] = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
A_ : int = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 286 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 286 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(_UpperCAmelCase ):
if len(_UpperCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_UpperCAmelCase ) )
return data_lists
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : list[list[float]] = []
for dlist, weight in zip(_UpperCAmelCase , _UpperCAmelCase ):
A_ : Any = min(_UpperCAmelCase )
A_ : Any = max(_UpperCAmelCase )
A_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A_ : Tuple = f"""Invalid weight of {weight:f} provided"""
raise ValueError(_UpperCAmelCase )
score_lists.append(_UpperCAmelCase )
return score_lists
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_UpperCAmelCase ):
A_ : Dict = final_scores[j] + ele
return final_scores
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = get_data(_UpperCAmelCase )
A_ : List[str] = calculate_each_score(_UpperCAmelCase , _UpperCAmelCase )
A_ : List[Any] = generate_final_scores(_UpperCAmelCase )
# append scores to source data
for i, ele in enumerate(_UpperCAmelCase ):
source_data[i].append(_UpperCAmelCase )
return source_data | 286 |
"""simple docstring"""
import os
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Any = os.path.join(os.path.dirname(_UpperCAmelCase ) , 'num.txt' )
with open(_UpperCAmelCase ) as file_hand:
return str(sum(int(_UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 286 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , **snake_case_ ):
"""simple docstring"""
requires_backends(self , ['bs4'] )
super().__init__(**snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = []
A_ : List[str] = []
A_ : int = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
A_ : int = parent.find_all(child.name , recursive=snake_case_ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(snake_case_ ) else next(i for i, s in enumerate(snake_case_ , 1 ) if s is child ) )
A_ : Union[str, Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = BeautifulSoup(snake_case_ , 'html.parser' )
A_ : Dict = []
A_ : Dict = []
A_ : Tuple = []
for element in html_code.descendants:
if type(snake_case_ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
A_ : Union[str, Any] = html.unescape(snake_case_ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(snake_case_ )
A_ , A_ : Optional[Any] = self.xpath_soup(snake_case_ )
stringaxtag_seq.append(snake_case_ )
stringaxsubs_seq.append(snake_case_ )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('Number of doc strings and xtags does not correspond' )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError('Number of doc strings and xsubs does not correspond' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = ''
for tagname, subs in zip(snake_case_ , snake_case_ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , snake_case_ ):
"""simple docstring"""
A_ : Optional[int] = False
# Check that strings has a valid type
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = True
elif isinstance(snake_case_ , (list, tuple) ):
if len(snake_case_ ) == 0 or isinstance(html_strings[0] , snake_case_ ):
A_ : str = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
F"""but is of type {type(snake_case_ )}.""" )
A_ : str = bool(isinstance(snake_case_ , (list, tuple) ) and (isinstance(html_strings[0] , snake_case_ )) )
if not is_batched:
A_ : Optional[Any] = [html_strings]
# Get nodes + xpaths
A_ : str = []
A_ : Tuple = []
for html_string in html_strings:
A_ , A_ , A_ : Optional[Any] = self.get_three_from_single(snake_case_ )
nodes.append(snake_case_ )
A_ : List[Any] = []
for node, tag_list, sub_list in zip(snake_case_ , snake_case_ , snake_case_ ):
A_ : int = self.construct_xpath(snake_case_ , snake_case_ )
xpath_strings.append(snake_case_ )
xpaths.append(snake_case_ )
# return as Dict
A_ : Tuple = {'nodes': nodes, 'xpaths': xpaths}
A_ : List[Any] = BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
return encoded_inputs | 286 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ : Dict = get_logger(__name__)
lowerCamelCase_ : List[str] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
"""simple docstring"""
for processor in self:
A_ : Tuple = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
A_ : Tuple = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
A_ : Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A_ : Optional[int] = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = scores / self.temperature
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A_ : str = top_p
A_ : Union[str, Any] = filter_value
A_ : int = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : Tuple = lax.top_k(snake_case_ , scores.shape[-1] )
A_ : List[Any] = jnp.full_like(snake_case_ , self.filter_value )
A_ : List[str] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
A_ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A_ : Union[str, Any] = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
A_ : int = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
A_ : Optional[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A_ : str = max(snake_case_ , snake_case_ )
A_ : Union[str, Any] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : int = scores.shape
A_ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
A_ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
A_ , A_ : Dict = lax.top_k(snake_case_ , snake_case_ )
A_ : Optional[int] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A_ : int = topk_scores.flatten()
A_ : Any = topk_indices.flatten() + shift
A_ : List[str] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
A_ : Union[str, Any] = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
A_ : str = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = max_length
A_ : Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A_ : Dict = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A_ : Any = min_length
A_ : List[Any] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A_ : Optional[Any] = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
A_ : Tuple = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
A_ : int = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Any = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A_ : Tuple = force_token_array.at[index].set(snake_case_ )
A_ : Any = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
def _force_token(snake_case_ ):
A_ : List[Any] = scores.shape[0]
A_ : Any = self.force_token_array[generation_idx]
A_ : Tuple = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float('inf' )
A_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A_ : int = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
A_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = generate_config.eos_token_id
A_ : Optional[int] = generate_config.no_timestamps_token_id
A_ : List[str] = generate_config.no_timestamps_token_id + 1
A_ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , 'max_initial_timestamp_index' ):
A_ : List[Any] = generate_config.max_initial_timestamp_index
else:
A_ : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A_ : Optional[Any] = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(snake_case_ , snake_case_ ):
A_ : Any = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
A_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
A_ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , snake_case_ , )
A_ : Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
A_ : Optional[Any] = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
A_ : int = self.timestamp_begin + self.max_initial_timestamp_index
A_ : List[Any] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A_ : Any = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
A_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A_ : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , snake_case_ , )
A_ : Union[str, Any] = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores | 286 | 1 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ : Optional[int] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCamelCase_ : Optional[Any] = {
'allenai/led-base-16384': 1_63_84,
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any = LEDTokenizer
lowercase_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , snake_case_=True , **snake_case_ , ):
"""simple docstring"""
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , trim_offsets=snake_case_ , **snake_case_ , )
A_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case_ ) != add_prefix_space:
A_ : Any = getattr(snake_case_ , pre_tok_state.pop('type' ) )
A_ : Optional[int] = add_prefix_space
A_ : Dict = pre_tok_class(**snake_case_ )
A_ : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : Optional[Any] = 'post_processor'
A_ : Tuple = getattr(self.backend_tokenizer , snake_case_ , snake_case_ )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : int = tuple(state['sep'] )
if "cls" in state:
A_ : Any = tuple(state['cls'] )
A_ : Optional[Any] = False
if state.get('add_prefix_space' , snake_case_ ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Optional[int] = True
if state.get('trim_offsets' , snake_case_ ) != trim_offsets:
A_ : Optional[Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Tuple = getattr(snake_case_ , state.pop('type' ) )
A_ : Tuple = component_class(**snake_case_ )
setattr(self.backend_tokenizer , snake_case_ , snake_case_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else value
A_ : Optional[Any] = value
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : Any = kwargs.get('is_split_into_words' , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[Any] = kwargs.get('is_split_into_words' , snake_case_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
A_ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = PaddingStrategy.DO_NOT_PAD , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : Dict = super()._pad(
encoded_inputs=snake_case_ , max_length=snake_case_ , padding_strategy=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , )
# Load from model defaults
if return_attention_mask is None:
A_ : int = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A_ : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A_ : Any = len(encoded_inputs['global_attention_mask'] ) != len(snake_case_ )
if needs_to_be_padded:
A_ : str = len(snake_case_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A_ : str = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
A_ : Optional[int] = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 286 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase ) | 286 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : List[str] = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = CustomTokenizer
pass | 286 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if curr_ind == len(_UpperCAmelCase ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_UpperCAmelCase ) ):
if valid_connection(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Insert current vertex into path as next transition
A_ : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , curr_ind + 1 ):
return True
# Backtrack
A_ : Any = -1
return False
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 0 ):
"""simple docstring"""
A_ : Optional[int] = [-1] * (len(_UpperCAmelCase ) + 1)
# initialize start and end of path with starting index
A_ : List[str] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_UpperCAmelCase , _UpperCAmelCase , 1 ) else [] | 286 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.get_masked_index(snake_case_ )
A_ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ):
"""simple docstring"""
if return_tensors is None:
A_ : Any = self.framework
A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.model(**snake_case_ )
A_ : Optional[int] = model_inputs['input_ids']
return model_outputs
def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : str = target_ids.shape[0]
A_ : Optional[Any] = model_outputs['input_ids'][0]
A_ : List[Any] = model_outputs['logits']
if self.framework == "tf":
A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : Optional[int] = outputs[0, masked_index, :]
A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ )
A_ , A_ : str = topk.values.numpy(), topk.indices.numpy()
else:
A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : Tuple = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : str = probs[..., target_ids]
A_ , A_ : List[str] = probs.topk(snake_case_ )
A_ : List[Any] = []
A_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : str = target_ids[p].tolist()
A_ : Union[str, Any] = p
# Filter padding out:
A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : int = {}
A_ : Tuple = []
for target in targets:
A_ : int = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A_ : Tuple = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids']
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A_ : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A_ : Tuple = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A_ : Optional[Any] = np.array(snake_case_ )
return target_ids
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = {}
if targets is not None:
A_ : Any = self.get_target_ids(snake_case_ , snake_case_ )
A_ : Optional[Any] = target_ids
if top_k is not None:
A_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs | 286 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase ) | 286 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
super().__init__(*snake_case_ , **snake_case_ )
A_ : Tuple = {}
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = super().add_tokens(snake_case_ , *snake_case_ , **snake_case_ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , snake_case_=1 , **snake_case_ ):
"""simple docstring"""
A_ : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
else:
A_ : Tuple = []
for i in range(snake_case_ ):
A_ : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A_ : Any = output
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False , snake_case_=1.0 ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : Optional[Any] = []
for i in range(len(snake_case_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A_ : List[Any] = self.token_map[placeholder_token]
A_ : Optional[int] = tokens[: 1 + int(len(snake_case_ ) * prop_tokens_to_load )]
if vector_shuffle:
A_ : Optional[Any] = copy.copy(snake_case_ )
random.shuffle(snake_case_ )
A_ : List[str] = text.replace(snake_case_ , ' '.join(snake_case_ ) )
return text
def __call__( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , ) | 286 | 1 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
lowerCamelCase_ : List[Any] = datasets.logging.get_logger(__name__)
lowerCamelCase_ : List[str] = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
lowerCamelCase_ : List[str] = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
lowerCamelCase_ : str = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.config_name == "default":
A_ : int = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
A_ : Any = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=False ):
"""simple docstring"""
if gpus is None:
A_ : Optional[int] = 1 if torch.cuda.is_available() else 0
A_ : Tuple = {'src': sources, 'mt': predictions, 'ref': references}
A_ : Any = [dict(zip(snake_case_ , snake_case_ ) ) for t in zip(*data.values() )]
A_ , A_ : Union[str, Any] = self.scorer.predict(snake_case_ , gpus=snake_case_ , progress_bar=snake_case_ )
return {"mean_score": mean_score, "scores": scores} | 286 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
A_ : Any = hex_num[0] == '-'
if is_negative:
A_ : Optional[Any] = hex_num[1:]
try:
A_ : Tuple = int(_UpperCAmelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
A_ : Union[str, Any] = ''
while int_num > 0:
A_ : Optional[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : str = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase_ : ClassVar[Features] = Features({"""audio""": Audio()} )
lowercase_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
lowercase_ : str = "audio"
lowercase_ : str = "labels"
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , snake_case_ ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
A_ : Optional[Any] = copy.deepcopy(self )
A_ : Optional[Any] = self.label_schema.copy()
A_ : Optional[Any] = features[self.label_column]
A_ : str = label_schema
return task_template
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 286 |
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}") | 286 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : int = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ : Union[str, Any] = emb.weight.shape
A_ : Dict = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
A_ : str = emb.weight.data
return lin_layer
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase="facebook/mbart-large-en-ro" , _UpperCAmelCase=False , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Tuple = torch.load(_UpperCAmelCase , map_location='cpu' )['model']
remove_ignore_keys_(_UpperCAmelCase )
A_ : Optional[int] = state_dict['encoder.embed_tokens.weight'].shape[0]
A_ : Optional[Any] = MBartConfig.from_pretrained(_UpperCAmelCase , vocab_size=_UpperCAmelCase )
if mbart_aa and finetuned:
A_ : str = 'relu'
A_ : Optional[Any] = state_dict['decoder.embed_tokens.weight']
A_ : Dict = MBartForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase )
if finetuned:
A_ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase_ : Any = parser.parse_args()
lowerCamelCase_ : Tuple = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 286 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """xmod"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=("en_XX",) , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : int = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Any = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : int = use_cache
A_ : Dict = classifier_dropout
A_ : int = pre_norm
A_ : Optional[Any] = adapter_reduction_factor
A_ : List[Any] = adapter_layer_norm
A_ : int = adapter_reuse_layer_norm
A_ : Dict = ln_before_adapter
A_ : List[str] = list(snake_case_ )
A_ : Union[str, Any] = default_language
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 286 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = tmp_path / 'cache'
A_ : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : int = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = tmp_path / 'cache'
A_ : Union[str, Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : int = features.copy() if features else default_expected_features
A_ : List[Any] = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : str = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = tmp_path / 'cache'
A_ : Tuple = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A_ : List[Any] = features.copy() if features else default_expected_features
A_ : Dict = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : List[str] = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A_ : Tuple = features.copy()
A_ : Tuple = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Optional[int] = tmp_path / 'cache'
A_ : int = JsonDatasetReader(_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = tmp_path / 'cache'
A_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : Optional[Any] = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase , split=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if issubclass(_UpperCAmelCase , _UpperCAmelCase ):
A_ : int = jsonl_path
elif issubclass(_UpperCAmelCase , _UpperCAmelCase ):
A_ : Any = [jsonl_path]
A_ : List[Any] = tmp_path / 'cache'
A_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : Union[str, Any] = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_dataset(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=("train",) ):
"""simple docstring"""
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for split in splits:
A_ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = tmp_path / 'cache'
A_ : str = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = JsonDatasetReader({'train': jsonl_path} , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = tmp_path / 'cache'
A_ : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : List[str] = features.copy() if features else default_expected_features
A_ : List[str] = (
Features({feature: Value(_UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Tuple = JsonDatasetReader({'train': jsonl_path} , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if split:
A_ : Dict = {split: jsonl_path}
else:
A_ : List[str] = 'train'
A_ : int = {'train': jsonl_path, 'test': jsonl_path}
A_ : Any = tmp_path / 'cache'
A_ : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : List[Any] = JsonDatasetReader(_UpperCAmelCase , cache_dir=_UpperCAmelCase ).read()
_check_json_datasetdict(_UpperCAmelCase , _UpperCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return json.load(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return [json.loads(_UpperCAmelCase ) for line in buffer]
class _UpperCAmelCase :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ ).write()
buffer.seek(0 )
A_ : Union[str, Any] = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ ).write()
buffer.seek(0 )
A_ : str = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(snake_case_ ) == 1_0
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
A_ : List[Any] = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
A_ : Optional[Any] = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(snake_case_ ) == 1_0
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
with pytest.raises(snake_case_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Any = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
A_ : Any = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(snake_case_ , snake_case_ , compression=snake_case_ ).write()
with fsspec.open(snake_case_ , 'rb' , compression='infer' ) as f:
A_ : Optional[int] = f.read()
with fsspec.open(snake_case_ , 'rb' , compression='infer' ) as f:
A_ : str = f.read()
assert exported_content == original_content | 286 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Union[str, Any] = """ViltImageProcessor"""
lowercase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
A_ : Dict = kwargs.pop('feature_extractor' )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
A_ : List[str] = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor | 286 | 1 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ : Optional[int] = 16
lowerCamelCase_ : Dict = 32
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
"""simple docstring"""
A_ : List[str] = AutoTokenizer.from_pretrained('bert-base-cased' )
A_ : int = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A_ : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Optional[Any] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : Tuple = 16
elif accelerator.mixed_precision != "no":
A_ : Optional[Any] = 8
else:
A_ : Optional[int] = None
return tokenizer.pad(
_UpperCAmelCase , padding='longest' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='pt' , )
# Instantiate dataloaders.
A_ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
A_ : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase_ : str = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCAmelCase ) == "1":
A_ : List[Any] = 2
# Initialize accelerator
A_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[str] = config['lr']
A_ : List[str] = int(config['num_epochs'] )
A_ : Optional[int] = int(config['seed'] )
A_ : Optional[Any] = int(config['batch_size'] )
A_ : Optional[int] = evaluate.load('glue' , 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCAmelCase )
def inner_training_loop(_UpperCAmelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
A_ : Any = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
A_ , A_ : Tuple = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate scheduler
A_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Tuple = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A_ : Optional[int] = model(**_UpperCAmelCase )
A_ : Tuple = outputs.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : str = model(**_UpperCAmelCase )
A_ : int = outputs.logits.argmax(dim=-1 )
A_ , A_ : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
A_ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _UpperCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Tuple = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
A_ : str = parser.parse_args()
A_ : Tuple = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main() | 286 |
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCamelCase_ : List[str] = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Optional[Any] = logging.getLogger()
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Dict = argparse.ArgumentParser()
parser.add_argument('-f' )
A_ : Optional[Any] = parser.parse_args()
return args.f
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase="eval" ):
"""simple docstring"""
A_ : List[Any] = os.path.join(_UpperCAmelCase , f"""{split}_results.json""" )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , 'r' ) as f:
return json.load(_UpperCAmelCase )
raise ValueError(f"""can't find {path}""" )
lowerCamelCase_ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.get_auto_remove_tmp_dir()
A_ : int = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(snake_case_ , 'argv' , snake_case_ ):
run_flax_glue.main()
A_ : Optional[int] = get_results(snake_case_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.get_auto_remove_tmp_dir()
A_ : List[str] = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(snake_case_ , 'argv' , snake_case_ ):
run_clm_flax.main()
A_ : Tuple = get_results(snake_case_ )
self.assertLess(result['eval_perplexity'] , 1_0_0 )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = self.get_auto_remove_tmp_dir()
A_ : Optional[Any] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(snake_case_ , 'argv' , snake_case_ ):
run_summarization_flax.main()
A_ : str = get_results(snake_case_ , split='test' )
self.assertGreaterEqual(result['test_rouge1'] , 1_0 )
self.assertGreaterEqual(result['test_rouge2'] , 2 )
self.assertGreaterEqual(result['test_rougeL'] , 7 )
self.assertGreaterEqual(result['test_rougeLsum'] , 7 )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.get_auto_remove_tmp_dir()
A_ : Optional[Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(snake_case_ , 'argv' , snake_case_ ):
run_mlm_flax.main()
A_ : int = get_results(snake_case_ )
self.assertLess(result['eval_perplexity'] , 4_2 )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.get_auto_remove_tmp_dir()
A_ : List[str] = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(snake_case_ , 'argv' , snake_case_ ):
run_ta_mlm_flax.main()
A_ : Tuple = get_results(snake_case_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.42 )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = 7 if get_gpu_count() > 1 else 2
A_ : int = self.get_auto_remove_tmp_dir()
A_ : List[Any] = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(snake_case_ , 'argv' , snake_case_ ):
run_flax_ner.main()
A_ : int = get_results(snake_case_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertGreaterEqual(result['eval_f1'] , 0.3 )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = self.get_auto_remove_tmp_dir()
A_ : Dict = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(snake_case_ , 'argv' , snake_case_ ):
run_qa.main()
A_ : Any = get_results(snake_case_ )
self.assertGreaterEqual(result['eval_f1'] , 3_0 )
self.assertGreaterEqual(result['eval_exact'] , 3_0 ) | 286 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Tuple = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['OwlViTFeatureExtractor']
lowerCamelCase_ : Union[str, Any] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 286 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=1_8 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : Union[str, Any] = parent
A_ : Tuple = batch_size
A_ : List[Any] = num_channels
A_ : Union[str, Any] = image_size
A_ : Union[str, Any] = min_resolution
A_ : Optional[Any] = max_resolution
A_ : Optional[int] = do_resize
A_ : Optional[int] = size if size is not None else {'height': 1_8, 'width': 2_0}
A_ : Optional[Any] = do_thumbnail
A_ : Optional[Any] = do_align_axis
A_ : int = do_pad
A_ : Tuple = do_normalize
A_ : int = image_mean
A_ : Optional[int] = image_std
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : List[Any] = DonutImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DonutImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , 'do_resize' ) )
self.assertTrue(hasattr(snake_case_ , 'size' ) )
self.assertTrue(hasattr(snake_case_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(snake_case_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(snake_case_ , 'do_pad' ) )
self.assertTrue(hasattr(snake_case_ , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case_ , 'image_mean' ) )
self.assertTrue(hasattr(snake_case_ , 'image_std' ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 1_8, 'width': 2_0} )
A_ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'height': 4_2, 'width': 4_2} )
# Previous config had dimensions in (width, height) order
A_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=(4_2, 8_4) )
self.assertEqual(image_processor.size , {'height': 8_4, 'width': 4_2} )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@is_flaky()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : Optional[Any] = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
A_ : str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
A_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
A_ : Union[str, Any] = image_processing(snake_case_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , ) | 286 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase_ : Any = re.compile(r'\s+')
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 ):
"""simple docstring"""
A_ : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated']
A_ : List[str] = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=0.05 ):
"""simple docstring"""
A_ : Any = ['unit tests', 'test file', 'configuration file']
A_ : Dict = example['content'].splitlines()
A_ : List[Any] = 0
A_ : str = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : Tuple = example['content'].count('\n' )
A_ : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = ['def ', 'class ', 'for ', 'while ']
A_ : Tuple = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=4 ):
"""simple docstring"""
A_ : Union[str, Any] = example['content'].splitlines()
A_ : Any = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
A_ : Dict = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
lowerCamelCase_ : Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : int = multiprocessing.cpu_count()
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase_ : Tuple = time.time()
lowerCamelCase_ : Tuple = load_dataset(args.dataset_name, split='train')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase_ : int = set(ds.unique('hash'))
lowerCamelCase_ : Union[str, Any] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase_ : Optional[int] = time.time()
lowerCamelCase_ : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase_ : Union[str, Any] = time.time()
lowerCamelCase_ , lowerCamelCase_ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase_ : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase_ : Optional[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowerCamelCase_ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase_ : Optional[int] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase_ : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 286 | 1 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCamelCase_ : Union[str, Any] = 'scheduler_config.json'
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[Any] = 1
lowercase_ : str = 2
lowercase_ : List[str] = 3
lowercase_ : List[Any] = 4
lowercase_ : int = 5
@dataclass
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : jnp.ndarray
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int = SCHEDULER_CONFIG_NAME
lowercase_ : str = ["""dtype"""]
lowercase_ : List[Any] = []
lowercase_ : Any = True
@classmethod
def lowerCamelCase_ ( cls , snake_case_ = None , snake_case_ = None , snake_case_=False , **snake_case_ , ):
"""simple docstring"""
A_ , A_ : str = cls.load_config(
pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , )
A_ , A_ : int = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ )
if hasattr(snake_case_ , 'create_state' ) and getattr(snake_case_ , 'has_state' , snake_case_ ):
A_ : Optional[int] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = False , **snake_case_ ):
"""simple docstring"""
self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCamelCase_ ( cls ):
"""simple docstring"""
A_ : int = list(set([cls.__name__] + cls._compatibles ) )
A_ : Dict = importlib.import_module(__name__.split('.' )[0] )
A_ : Tuple = [
getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ )
]
return compatible_classes
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
assert len(_UpperCAmelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_UpperCAmelCase ) - x.ndim) ) , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=0.999 , _UpperCAmelCase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_UpperCAmelCase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
A_ : List[Any] = []
for i in range(_UpperCAmelCase ):
A_ : Optional[Any] = i / num_diffusion_timesteps
A_ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_UpperCAmelCase ) / alpha_bar(_UpperCAmelCase ) , _UpperCAmelCase ) )
return jnp.array(_UpperCAmelCase , dtype=_UpperCAmelCase )
@flax.struct.dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
@classmethod
def lowerCamelCase_ ( cls , snake_case_ ):
"""simple docstring"""
A_ : Tuple = scheduler.config
if config.trained_betas is not None:
A_ : str = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
A_ : str = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ : int = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ : Optional[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
A_ : List[Any] = 1.0 - betas
A_ : Optional[int] = jnp.cumprod(snake_case_ , axis=0 )
return cls(
alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = state.alphas_cumprod
A_ : Tuple = alphas_cumprod[timesteps] ** 0.5
A_ : str = sqrt_alpha_prod.flatten()
A_ : str = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
A_ : Union[str, Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
A_ : List[str] = sqrt_one_minus_alpha_prod.flatten()
A_ : List[Any] = broadcast_to_shape_from_left(_UpperCAmelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ : List[Any] = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ : int = get_sqrt_alpha_prod(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity | 286 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = in_proj_bias[: config.hidden_size]
A_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = dct.pop(_UpperCAmelCase )
A_ : Optional[int] = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCAmelCase , )
A_ : Optional[int] = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=384 , num_labels=1000 )
A_ : Union[str, Any] = False
# load original model from timm
A_ : List[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
A_ : Any = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = 'huggingface/label-files'
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : List[Any] = ViTHybridModel(_UpperCAmelCase ).eval()
else:
A_ : str = ViTHybridForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
A_ : List[str] = transform.transforms
A_ : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A_ : Tuple = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Optional[Any] = prepare_img()
A_ : Any = transform(_UpperCAmelCase ).unsqueeze(0 )
A_ : Dict = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
A_ : List[Any] = model(_UpperCAmelCase )
A_ : List[str] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
A_ : Union[str, Any] = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
A_ : Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub) | 286 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCamelCase_ : List[Any] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCamelCase_ : Tuple = '</w>'
lowerCamelCase_ : str = '@@ '
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = set()
A_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : str = char
return pairs
# Speech2Text2 has no max input length
lowerCamelCase_ : List[str] = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="<pad>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_=False , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(
unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , do_lower_case=snake_case_ , **snake_case_ , )
A_ : Dict = do_lower_case
with open(snake_case_ , encoding='utf-8' ) as vocab_handle:
A_ : Optional[Any] = json.load(snake_case_ )
A_ : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
A_ : Any = None
A_ : Any = None
else:
with open(snake_case_ , encoding='utf-8' ) as merges_handle:
A_ : Dict = merges_handle.read().split('\n' )[:-1]
A_ : Dict = [tuple(merge.split()[:2] ) for merge in merges]
A_ : Optional[int] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A_ : List[str] = {}
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return len(self.decoder )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A_ : Union[str, Any] = get_pairs(snake_case_ )
if not pairs:
return token
while True:
A_ : Any = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Tuple = bigram
A_ : Any = []
A_ : List[str] = 0
while i < len(snake_case_ ):
try:
A_ : List[str] = word.index(snake_case_ , snake_case_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Any = j
if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : List[str] = tuple(snake_case_ )
A_ : Dict = new_word
if len(snake_case_ ) == 1:
break
else:
A_ : Dict = get_pairs(snake_case_ )
A_ : int = ' '.join(snake_case_ )
if word == "\n " + BPE_TOKEN_MERGES:
A_ : Tuple = '\n' + BPE_TOKEN_MERGES
if word.endswith(snake_case_ ):
A_ : Union[str, Any] = word.replace(snake_case_ , '' )
A_ : Any = word.replace(' ' , snake_case_ )
A_ : Optional[int] = word
return word
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A_ : Optional[int] = text.lower()
A_ : int = text.split()
A_ : List[Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(snake_case_ ).split(' ' ) ) )
return split_tokens
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = self.decoder.get(snake_case_ , self.unk_token )
return result
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = ' '.join(snake_case_ )
# make sure @@ tokens are concatenated
A_ : List[str] = ''.join(string.split(snake_case_ ) )
return string
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[Any] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A_ : Tuple = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '\n' )
A_ : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(snake_case_ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
A_ : Any = token_index
writer.write(' '.join(snake_case_ ) + '\n' )
index += 1
return (vocab_file, merges_file) | 286 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A_ : str = False
if num < 0:
A_ : Dict = True
A_ : Union[str, Any] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ = True , snake_case_ = False ):
"""simple docstring"""
A_ : List[str] = scheduler
A_ : str = optimizers if isinstance(snake_case_ , (list, tuple) ) else [optimizers]
A_ : Optional[Any] = split_batches
A_ : Union[str, Any] = step_with_optimizer
A_ : Dict = GradientState()
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*snake_case_ , **snake_case_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*snake_case_ , **snake_case_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
A_ : Union[str, Any] = AcceleratorState().num_processes
for _ in range(snake_case_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*snake_case_ , **snake_case_ )
else:
self.scheduler.step(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.scheduler.state_dict()
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
self.scheduler.load_state_dict(snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.scheduler.get_lr()
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.scheduler.print_lr(*snake_case_ , **snake_case_ ) | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
"""simple docstring"""
from collections import deque
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = len(_UpperCAmelCase )
A_ : Dict = deque()
A_ : Any = [False for _ in range(_UpperCAmelCase )]
A_ : Optional[int] = [-1 for _ in range(_UpperCAmelCase )]
A_ : Tuple = index_of[:]
def strong_connect(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A_ : str = index # the number when this node is seen
A_ : List[Any] = index # lowest rank node reachable from here
index += 1
stack.append(_UpperCAmelCase )
A_ : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
A_ : int = strong_connect(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : List[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A_ : Tuple = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A_ : Dict = []
A_ : int = stack.pop()
A_ : int = False
component.append(_UpperCAmelCase )
while w != v:
A_ : Optional[int] = stack.pop()
A_ : int = False
component.append(_UpperCAmelCase )
components.append(_UpperCAmelCase )
return index
A_ : Optional[Any] = []
for v in range(_UpperCAmelCase ):
if index_of[v] == -1:
strong_connect(_UpperCAmelCase , 0 , _UpperCAmelCase )
return components
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : str = [[] for _ in range(_UpperCAmelCase )]
for u, v in edges:
g[u].append(_UpperCAmelCase )
return g
if __name__ == "__main__":
# Test
lowerCamelCase_ : int = 7
lowerCamelCase_ : Dict = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowerCamelCase_ : Any = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowerCamelCase_ : Optional[Any] = [(u, v) for u, v in zip(source, target)]
lowerCamelCase_ : Optional[int] = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) | 286 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase_ : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Union[str, Any] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
A_ : Tuple = os.path.join(_UpperCAmelCase , 'words.txt' )
A_ : List[Any] = ''
with open(_UpperCAmelCase ) as f:
A_ : int = f.readline()
A_ : Optional[Any] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
A_ : Dict = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution()) | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return "".join(sorted(_UpperCAmelCase ) )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return word_by_signature[signature(_UpperCAmelCase )]
lowerCamelCase_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
lowerCamelCase_ : int = sorted({word.strip().lower() for word in data.splitlines()})
lowerCamelCase_ : int = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams)) | 286 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Dict = CTRLTokenizer
lowercase_ : str = False
lowercase_ : Any = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : str = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
A_ : Union[str, Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
A_ : int = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
A_ : int = {'unk_token': '<unk>'}
A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case_ ) )
def lowerCamelCase_ ( self , **snake_case_ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = 'adapt react readapt apt'
A_ : List[Any] = 'adapt react readapt apt'
return input_text, output_text
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ : Optional[int] = 'adapt react readapt apt'
A_ : Dict = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
A_ : Tuple = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
A_ : str = tokens + [tokenizer.unk_token]
A_ : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ ) | 286 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
def __call__( self ):
"""simple docstring"""
A_ : Optional[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A_ : List[str] = 1
A_ : List[str] = self.unet(snake_case_ , snake_case_ ).sample
A_ : Optional[int] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
A_ : List[Any] = scheduler_output - scheduler_output + torch.ones_like(snake_case_ )
return result | 286 | 1 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __get__( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
A_ : Optional[int] = '__cached_' + self.fget.__name__
A_ : str = getattr(snake_case_ , snake_case_ , snake_case_ )
if cached is None:
A_ : List[Any] = self.fget(snake_case_ )
setattr(snake_case_ , snake_case_ , snake_case_ )
return cached
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if is_torch_fx_proxy(_UpperCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(_UpperCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_UpperCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_UpperCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_UpperCAmelCase , np.ndarray )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return isinstance(_UpperCAmelCase , np.ndarray )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return _is_numpy(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
import torch
return isinstance(_UpperCAmelCase , torch.Tensor )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
import torch
return isinstance(_UpperCAmelCase , torch.device )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
import torch
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
A_ : int = getattr(_UpperCAmelCase , _UpperCAmelCase )
else:
return False
return isinstance(_UpperCAmelCase , torch.dtype )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
import tensorflow as tf
return isinstance(_UpperCAmelCase , tf.Tensor )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_UpperCAmelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(_UpperCAmelCase )
return type(_UpperCAmelCase ) == tf.Tensor
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(_UpperCAmelCase , jnp.ndarray )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(_UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(_UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return [to_py_obj(_UpperCAmelCase ) for o in obj]
elif is_tf_tensor(_UpperCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(_UpperCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_UpperCAmelCase ):
return np.asarray(_UpperCAmelCase ).tolist()
elif isinstance(_UpperCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , (dict, UserDict) ):
return {k: to_numpy(_UpperCAmelCase ) for k, v in obj.items()}
elif isinstance(_UpperCAmelCase , (list, tuple) ):
return np.array(_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
return obj.numpy()
elif is_torch_tensor(_UpperCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_UpperCAmelCase ):
return np.asarray(_UpperCAmelCase )
else:
return obj
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = fields(self )
# Safety and consistency checks
if not len(snake_case_ ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
A_ : Any = getattr(self , class_fields[0].name )
A_ : Union[str, Any] = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(snake_case_ ):
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = first_field.items()
A_ : List[str] = True
else:
try:
A_ : str = iter(snake_case_ )
A_ : int = True
except TypeError:
A_ : List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(snake_case_ ):
if (
not isinstance(snake_case_ , (list, tuple) )
or not len(snake_case_ ) == 2
or not isinstance(element[0] , snake_case_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
A_ : Optional[int] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
A_ : str = element[1]
elif first_field is not None:
A_ : int = first_field
else:
for field in class_fields:
A_ : int = getattr(self , field.name )
if v is not None:
A_ : Optional[Any] = v
def __delitem__( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(snake_case_ , snake_case_ )
super().__setattr__(snake_case_ , snake_case_ )
def __setitem__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
super().__setitem__(snake_case_ , snake_case_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@classmethod
def lowerCamelCase_ ( cls , snake_case_ ):
"""simple docstring"""
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = """longest"""
lowercase_ : Dict = """max_length"""
lowercase_ : Optional[Any] = """do_not_pad"""
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = """pt"""
lowercase_ : Any = """tf"""
lowercase_ : Tuple = """np"""
lowercase_ : Any = """jax"""
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = context_managers
A_ : List[str] = ExitStack()
def __enter__( self ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(snake_case_ )
def __exit__( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
self.stack.__exit__(*snake_case_ , **snake_case_ )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : str = infer_framework(_UpperCAmelCase )
if framework == "tf":
A_ : List[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A_ : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
A_ : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = model_class.__name__
A_ : int = infer_framework(_UpperCAmelCase )
if framework == "tf":
A_ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
A_ : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
A_ : Dict = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = "" , _UpperCAmelCase = "." ):
"""simple docstring"""
def _flatten_dict(_UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase="." ):
for k, v in d.items():
A_ : Dict = str(_UpperCAmelCase ) + delimiter + str(_UpperCAmelCase ) if parent_key else k
if v and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
yield from flatten_dict(_UpperCAmelCase , _UpperCAmelCase , delimiter=_UpperCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
@contextmanager
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.transpose(_UpperCAmelCase , axes=_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.T if axes is None else array.permute(*_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.transpose(_UpperCAmelCase , perm=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.transpose(_UpperCAmelCase , axes=_UpperCAmelCase )
else:
raise ValueError(f"""Type not supported for transpose: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.reshape(_UpperCAmelCase , _UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.reshape(*_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.reshape(_UpperCAmelCase , _UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.reshape(_UpperCAmelCase , _UpperCAmelCase )
else:
raise ValueError(f"""Type not supported for reshape: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=None ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.squeeze(_UpperCAmelCase , axis=_UpperCAmelCase )
else:
raise ValueError(f"""Type not supported for squeeze: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.expand_dims(_UpperCAmelCase , _UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.unsqueeze(dim=_UpperCAmelCase )
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return jnp.expand_dims(_UpperCAmelCase , axis=_UpperCAmelCase )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if is_numpy_array(_UpperCAmelCase ):
return np.size(_UpperCAmelCase )
elif is_torch_tensor(_UpperCAmelCase ):
return array.numel()
elif is_tf_tensor(_UpperCAmelCase ):
import tensorflow as tf
return tf.size(_UpperCAmelCase )
elif is_jax_tensor(_UpperCAmelCase ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(_UpperCAmelCase )}.""" )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(_UpperCAmelCase , (tuple, list) ):
A_ : Union[str, Any] = [f"""{repo_id}--{v}""" if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
A_ : List[str] = f"""{repo_id}--{value}"""
return auto_map
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
for base_class in inspect.getmro(_UpperCAmelCase ):
A_ : Any = base_class.__module__
A_ : List[str] = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" ) | 286 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
"""simple docstring"""
A_ , A_ : List[str] = grid.shape
A_ : Optional[int] = [-1, 1, 0, 0]
A_ : str = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A_ , A_ : List[Any] = [(0, source)], set()
A_ : Optional[Any] = np.full((rows, cols) , np.inf )
A_ : int = 0
A_ : Optional[int] = np.empty((rows, cols) , dtype=_UpperCAmelCase )
A_ : Optional[int] = None
while queue:
((A_) , (A_)) : str = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A_ : int = []
while (x, y) != source:
path.append((x, y) )
A_ , A_ : List[Any] = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
A_ , A_ : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A_ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase , (dist + 1, (nx, ny)) )
A_ : Optional[Any] = dist + 1
A_ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 286 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """informer"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case_ = None , snake_case_ = None , snake_case_ = "student_t" , snake_case_ = "nll" , snake_case_ = 1 , snake_case_ = None , snake_case_ = "mean" , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = 6_4 , snake_case_ = 3_2 , snake_case_ = 3_2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = True , snake_case_ = "gelu" , snake_case_ = 0.05 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 0.1 , snake_case_ = 1_0_0 , snake_case_ = 0.02 , snake_case_=True , snake_case_ = "prob" , snake_case_ = 5 , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = prediction_length
A_ : List[Any] = context_length or prediction_length
A_ : str = distribution_output
A_ : Dict = loss
A_ : Any = input_size
A_ : Union[str, Any] = num_time_features
A_ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : List[Any] = scaling
A_ : Tuple = num_dynamic_real_features
A_ : Any = num_static_real_features
A_ : str = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Optional[int] = cardinality
else:
A_ : Optional[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = embedding_dimension
else:
A_ : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : int = num_parallel_samples
# Transformer architecture configuration
A_ : str = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : List[Any] = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : List[Any] = encoder_ffn_dim
A_ : Union[str, Any] = decoder_ffn_dim
A_ : int = encoder_layers
A_ : Any = decoder_layers
A_ : List[Any] = dropout
A_ : str = attention_dropout
A_ : Tuple = activation_dropout
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[int] = init_std
A_ : List[Any] = use_cache
# Informer
A_ : Tuple = attention_type
A_ : List[Any] = sampling_factor
A_ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 286 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : str = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = """roc_bert"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=True , snake_case_=0 , snake_case_="absolute" , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=7_6_8 , snake_case_=9_1_0 , snake_case_=5_1_2 , snake_case_=2_4_8_5_8 , snake_case_=True , **snake_case_ , ):
"""simple docstring"""
A_ : int = vocab_size
A_ : Dict = max_position_embeddings
A_ : str = hidden_size
A_ : List[str] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : str = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Any = initializer_range
A_ : Dict = type_vocab_size
A_ : int = layer_norm_eps
A_ : str = use_cache
A_ : Optional[int] = enable_pronunciation
A_ : int = enable_shape
A_ : Union[str, Any] = pronunciation_embed_dim
A_ : str = pronunciation_vocab_size
A_ : str = shape_embed_dim
A_ : Union[str, Any] = shape_vocab_size
A_ : Dict = concat_input
A_ : str = position_embedding_type
A_ : List[str] = classifier_dropout
super().__init__(pad_token_id=snake_case_ , **snake_case_ ) | 286 |
"""simple docstring"""
import os
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Any = os.path.join(os.path.dirname(_UpperCAmelCase ) , 'num.txt' )
with open(_UpperCAmelCase ) as file_hand:
return str(sum(int(_UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : int = num_of_nodes
A_ : list[list[int]] = []
A_ : dict[int, int] = {}
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
A_ : int = self.find_component(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
A_ : int = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
A_ : str = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = []
A_ : int = 0
A_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
A_ : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
A_ , A_ , A_ : List[Any] = edge
A_ : List[Any] = self.m_component[u]
A_ : Optional[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
A_ : List[str] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
A_ , A_ , A_ : Optional[int] = edge
A_ : Dict = self.m_component[u]
A_ : List[str] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
A_ : Optional[Any] = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def UpperCAmelCase__ ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ : Dict = get_logger(__name__)
lowerCamelCase_ : List[str] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
"""simple docstring"""
for processor in self:
A_ : Tuple = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
A_ : Tuple = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
A_ : Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A_ : Optional[int] = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = scores / self.temperature
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A_ : str = top_p
A_ : Union[str, Any] = filter_value
A_ : int = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : Tuple = lax.top_k(snake_case_ , scores.shape[-1] )
A_ : List[Any] = jnp.full_like(snake_case_ , self.filter_value )
A_ : List[str] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
A_ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A_ : Union[str, Any] = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
A_ : int = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
A_ : Optional[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A_ : str = max(snake_case_ , snake_case_ )
A_ : Union[str, Any] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : int = scores.shape
A_ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
A_ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
A_ , A_ : Dict = lax.top_k(snake_case_ , snake_case_ )
A_ : Optional[int] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A_ : int = topk_scores.flatten()
A_ : Any = topk_indices.flatten() + shift
A_ : List[str] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
A_ : Union[str, Any] = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
A_ : str = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = max_length
A_ : Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A_ : Dict = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A_ : Any = min_length
A_ : List[Any] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A_ : Optional[Any] = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
A_ : Tuple = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
A_ : int = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Any = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A_ : Tuple = force_token_array.at[index].set(snake_case_ )
A_ : Any = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
def _force_token(snake_case_ ):
A_ : List[Any] = scores.shape[0]
A_ : Any = self.force_token_array[generation_idx]
A_ : Tuple = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float('inf' )
A_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A_ : int = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
A_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = generate_config.eos_token_id
A_ : Optional[int] = generate_config.no_timestamps_token_id
A_ : List[str] = generate_config.no_timestamps_token_id + 1
A_ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , 'max_initial_timestamp_index' ):
A_ : List[Any] = generate_config.max_initial_timestamp_index
else:
A_ : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A_ : Optional[Any] = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(snake_case_ , snake_case_ ):
A_ : Any = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
A_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
A_ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , snake_case_ , )
A_ : Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
A_ : Optional[Any] = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
A_ : int = self.timestamp_begin + self.max_initial_timestamp_index
A_ : List[Any] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A_ : Any = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
A_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A_ : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , snake_case_ , )
A_ : Union[str, Any] = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores | 286 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase_ : List[str] = datasets.utils.logging.get_logger(__name__)
class _UpperCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
lowercase_ : bool = None
lowercase_ : bool = None
class _UpperCAmelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
lowercase_ : List[str] = datasets.Audio()
lowercase_ : int = """audio"""
lowercase_ : List[Any] = AudioFolderConfig
lowercase_ : List[str] # definition at the bottom of the script
lowercase_ : Tuple = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowerCamelCase_ : Dict = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
lowerCamelCase_ : Optional[Any] = AUDIO_EXTENSIONS | 286 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase ) | 286 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase_ : List[str] = None
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase_ : str = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCamelCase_ : List[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCamelCase_ : List[Any] = '▁'
# Segments (not really needed)
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : Any = 1
lowerCamelCase_ : Optional[int] = 2
lowerCamelCase_ : List[str] = 3
lowerCamelCase_ : List[Any] = 4
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = VOCAB_FILES_NAMES
lowercase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = """left"""
lowercase_ : Any = XLNetTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_=False , snake_case_="<s>" , snake_case_="</s>" , snake_case_="<unk>" , snake_case_="<sep>" , snake_case_="<pad>" , snake_case_="<cls>" , snake_case_="<mask>" , snake_case_=["<eop>", "<eod>"] , **snake_case_ , ):
"""simple docstring"""
A_ : Union[str, Any] = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
super().__init__(
vocab_file=snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , remove_space=snake_case_ , keep_accents=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
A_ : Dict = 3
A_ : Optional[Any] = do_lower_case
A_ : Optional[Any] = remove_space
A_ : Union[str, Any] = keep_accents
A_ : Tuple = vocab_file
A_ : Optional[int] = False if not self.vocab_file else True
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : List[Any] = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : Optional[Any] = [self.sep_token_id]
A_ : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[Any] = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
return (out_vocab_file,) | 286 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = CustomTokenizer
pass | 286 | 1 |
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : int = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCamelCase_ : list[bool | None] = [None] * 10_00_00_00
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Tuple = False
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ : Optional[int] = chain(next_number(_UpperCAmelCase ) )
A_ : List[str] = number_chain
while number < 10000000:
A_ : List[Any] = number_chain
number *= 10
return number_chain
def UpperCAmelCase__ ( _UpperCAmelCase = 10000000 ):
"""simple docstring"""
for i in range(1 , _UpperCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 286 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.get_masked_index(snake_case_ )
A_ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ):
"""simple docstring"""
if return_tensors is None:
A_ : Any = self.framework
A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.model(**snake_case_ )
A_ : Optional[int] = model_inputs['input_ids']
return model_outputs
def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : str = target_ids.shape[0]
A_ : Optional[Any] = model_outputs['input_ids'][0]
A_ : List[Any] = model_outputs['logits']
if self.framework == "tf":
A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : Optional[int] = outputs[0, masked_index, :]
A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ )
A_ , A_ : str = topk.values.numpy(), topk.indices.numpy()
else:
A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : Tuple = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : str = probs[..., target_ids]
A_ , A_ : List[str] = probs.topk(snake_case_ )
A_ : List[Any] = []
A_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : str = target_ids[p].tolist()
A_ : Union[str, Any] = p
# Filter padding out:
A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : int = {}
A_ : Tuple = []
for target in targets:
A_ : int = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A_ : Tuple = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids']
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A_ : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A_ : Tuple = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A_ : Optional[Any] = np.array(snake_case_ )
return target_ids
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = {}
if targets is not None:
A_ : Any = self.get_target_ids(snake_case_ , snake_case_ )
A_ : Optional[Any] = target_ids
if top_k is not None:
A_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs | 286 | 1 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
def run_func(_UpperCAmelCase ):
@wraps(_UpperCAmelCase )
def run_in_eager_mode(*_UpperCAmelCase , **_UpperCAmelCase ):
return func(*_UpperCAmelCase , **_UpperCAmelCase )
@wraps(_UpperCAmelCase )
@tf.function(experimental_compile=_UpperCAmelCase )
def run_in_graph_mode(*_UpperCAmelCase , **_UpperCAmelCase ):
return func(*_UpperCAmelCase , **_UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = random.Random()
A_ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : TensorFlowBenchmarkArguments
lowercase_ : PretrainedConfig
lowercase_ : str = "TensorFlow"
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return tf.__version__
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : str = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A_ : int = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_inference )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A_ : str = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_speed(_train )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
A_ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A_ : Any = self._prepare_inference_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_inference )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case_ )
A_ : List[str] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A_ : str = self._prepare_train_func(snake_case_ , snake_case_ , snake_case_ )
return self._measure_memory(_train )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : str = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
A_ : Optional[Any] = (
hasattr(snake_case_ , 'architectures' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A_ : str = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
A_ : Optional[Any] = __import__('transformers' , fromlist=[model_class] )
A_ : int = getattr(snake_case_ , snake_case_ )
A_ : List[str] = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
A_ : List[str] = TF_MODEL_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
A_ : Optional[int] = config.vocab_size if hasattr(snake_case_ , 'vocab_size' ) else config.encoder.vocab_size
A_ : Optional[int] = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(snake_case_ , decoder_input_ids=snake_case_ , training=snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(snake_case_ , training=snake_case_ )
A_ : str = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
A_ : int = (
hasattr(snake_case_ , 'architectures' )
and isinstance(config.architectures , snake_case_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A_ : Any = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
A_ : Optional[Any] = __import__('transformers' , fromlist=[model_class] )
A_ : Tuple = getattr(snake_case_ , snake_case_ )
A_ : Union[str, Any] = model_cls(snake_case_ )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
A_ : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case_ )
# encoder-decoder has vocab size saved differently
A_ : Optional[int] = config.vocab_size if hasattr(snake_case_ , 'vocab_size' ) else config.encoder.vocab_size
A_ : Dict = random_input_ids(snake_case_ , snake_case_ , snake_case_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
A_ : int = model(snake_case_ , decoder_input_ids=snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
A_ : Union[str, Any] = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
A_ : Any = model(snake_case_ , labels=snake_case_ , training=snake_case_ )[0]
A_ : List[Any] = tf.gradients(snake_case_ , model.trainable_variables )
return gradients
A_ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(snake_case_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
A_ : List[str] = timeit.repeat(
snake_case_ , repeat=self.args.repeat , number=1_0 , )
return min(snake_case_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
A_ : Optional[Any] = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
A_ : Tuple = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
A_ : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
A_ : int = nvml.nvmlDeviceGetMemoryInfo(snake_case_ )
A_ : int = meminfo.used
A_ : int = Memory(snake_case_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
A_ : Any = None
else:
A_ : List[str] = measure_peak_memory_cpu(snake_case_ )
A_ : Any = Memory(snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
A_ : List[str] = stop_memory_tracing(snake_case_ )
if memory is None:
A_ : Union[str, Any] = summary.total
else:
A_ : Any = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None | 286 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
super().__init__(*snake_case_ , **snake_case_ )
A_ : Tuple = {}
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = super().add_tokens(snake_case_ , *snake_case_ , **snake_case_ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , snake_case_=1 , **snake_case_ ):
"""simple docstring"""
A_ : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
else:
A_ : Tuple = []
for i in range(snake_case_ ):
A_ : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
A_ : Any = output
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False , snake_case_=1.0 ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : Optional[Any] = []
for i in range(len(snake_case_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A_ : List[Any] = self.token_map[placeholder_token]
A_ : Optional[int] = tokens[: 1 + int(len(snake_case_ ) * prop_tokens_to_load )]
if vector_shuffle:
A_ : Optional[Any] = copy.copy(snake_case_ )
random.shuffle(snake_case_ )
A_ : List[str] = text.replace(snake_case_ , ' '.join(snake_case_ ) )
return text
def __call__( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
def lowerCamelCase_ ( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , ) | 286 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """xmod"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=("en_XX",) , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : int = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Any = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : int = use_cache
A_ : Dict = classifier_dropout
A_ : int = pre_norm
A_ : Optional[Any] = adapter_reduction_factor
A_ : List[Any] = adapter_layer_norm
A_ : int = adapter_reuse_layer_norm
A_ : Dict = ln_before_adapter
A_ : List[str] = list(snake_case_ )
A_ : Union[str, Any] = default_language
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 286 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
A_ : Any = hex_num[0] == '-'
if is_negative:
A_ : Optional[Any] = hex_num[1:]
try:
A_ : Tuple = int(_UpperCAmelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
A_ : Union[str, Any] = ''
while int_num > 0:
A_ : Optional[Any] = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Union[str, Any] = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['LayoutLMv2FeatureExtractor']
lowerCamelCase_ : Optional[Any] = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 |
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}") | 286 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = ["""pixel_values"""]
def __init__( self , snake_case_ = True , snake_case_ = 3_2 , snake_case_=PILImageResampling.BILINEAR , snake_case_ = True , **snake_case_ , ):
"""simple docstring"""
A_ : str = do_resize
A_ : Any = do_rescale
A_ : int = size_divisor
A_ : Optional[int] = resample
super().__init__(**snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ ):
"""simple docstring"""
A_ , A_ : Dict = get_image_size(snake_case_ )
# Rounds the height and width down to the closest multiple of size_divisor
A_ : List[Any] = height // size_divisor * size_divisor
A_ : str = width // size_divisor * size_divisor
A_ : Tuple = resize(snake_case_ , (new_h, new_w) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
return image
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ ):
"""simple docstring"""
return rescale(image=snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_=None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
"""simple docstring"""
A_ : int = do_resize if do_resize is not None else self.do_resize
A_ : int = do_rescale if do_rescale is not None else self.do_rescale
A_ : Optional[int] = size_divisor if size_divisor is not None else self.size_divisor
A_ : int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
A_ : Dict = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(snake_case_ ) for img in images]
if do_resize:
A_ : Any = [self.resize(snake_case_ , size_divisor=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
A_ : Union[str, Any] = [self.rescale(snake_case_ , scale=1 / 2_5_5 ) for image in images]
A_ : Dict = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
A_ : List[str] = {'pixel_values': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ ) | 286 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Tuple = """xmod"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , snake_case_=False , snake_case_=2 , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=("en_XX",) , snake_case_=None , **snake_case_ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : int = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Any = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : int = use_cache
A_ : Dict = classifier_dropout
A_ : int = pre_norm
A_ : Optional[Any] = adapter_reduction_factor
A_ : List[Any] = adapter_layer_norm
A_ : int = adapter_reuse_layer_norm
A_ : Dict = ln_before_adapter
A_ : List[str] = list(snake_case_ )
A_ : Union[str, Any] = default_language
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Dict = ["""image_processor""", """tokenizer"""]
lowercase_ : Union[str, Any] = """ViltImageProcessor"""
lowercase_ : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
A_ : Dict = kwargs.pop('feature_extractor' )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
A_ : List[str] = self.image_processor
def __call__( self , snake_case_ , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : str = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , *snake_case_ , **snake_case_ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = str(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 9 and set(_UpperCAmelCase ) == set('123456789' )
def UpperCAmelCase__ ( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
A_ : Union[str, Any] = 100002 * base_num
if is_9_pandigital(_UpperCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
A_ : List[Any] = 1002003 * base_num
if is_9_pandigital(_UpperCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F"{solution() = }") | 286 |
"""simple docstring"""
from copy import deepcopy
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ = None , snake_case_ = None ):
"""simple docstring"""
if arr is None and size is not None:
A_ : Union[str, Any] = size
A_ : List[str] = [0] * size
elif arr is not None:
self.init(snake_case_ )
else:
raise ValueError('Either arr or size must be specified' )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = len(snake_case_ )
A_ : Optional[int] = deepcopy(snake_case_ )
for i in range(1 , self.size ):
A_ : Optional[Any] = self.next_(snake_case_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
A_ : Optional[int] = self.next_(snake_case_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def lowerCamelCase_ ( snake_case_ ):
"""simple docstring"""
return index - (index & (-index))
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A_ : List[str] = self.next_(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
self.add(snake_case_ , value - self.get(snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if right == 0:
return 0
A_ : Any = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A_ : Tuple = self.prev(snake_case_ )
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
return self.prefix(snake_case_ ) - self.prefix(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.query(snake_case_ , index + 1 )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
A_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A_ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
A_ : Optional[int] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , snake_case_ = 7_6_8 , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Parameter(torch.zeros(1 , snake_case_ ) )
A_ : Optional[int] = nn.Parameter(torch.ones(1 , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ = None , snake_case_ = None , ):
"""simple docstring"""
A_ : str = nn.Parameter(self.mean.to(snake_case_ ).to(snake_case_ ) )
A_ : Optional[int] = nn.Parameter(self.std.to(snake_case_ ).to(snake_case_ ) )
return self
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = (embeds * self.std) + self.mean
return embeds | 286 | 1 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A_ : str = False
if num < 0:
A_ : Dict = True
A_ : Union[str, Any] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ : Tuple = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 286 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.