code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import inspect
import unittest
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Optional[int] ) ->Optional[int]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self : Any ) ->List[str]:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
a__ :Union[str, Any] = inspect.getmembers(__UpperCamelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
a__ :Dict = "k-diffusion"
elif backend == "invisible_watermark":
a__ :str = "invisible-watermark"
assert backend in deps, F'''{backend} is not in the deps table!'''
| 395 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_UpperCamelCase : Dict = ""
_UpperCamelCase : Optional[Any] = ""
_UpperCamelCase : int = ""
_UpperCamelCase : Union[str, Any] = ""
def snake_case ( snake_case : str ) -> None:
"""simple docstring"""
lowerCAmelCase = tweepy.OAuthHandler(snake_case , snake_case )
auth.set_access_token(snake_case , snake_case )
lowerCAmelCase = tweepy.API(snake_case )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase = api.user_timeline(screen_name=snake_case , count=200 )
# save most recent tweets
alltweets.extend(snake_case )
# save the id of the oldest tweet less one
lowerCAmelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(snake_case ) > 0:
print(F'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase = api.user_timeline(
screen_name=snake_case , count=200 , max_id=snake_case )
# save most recent tweets
alltweets.extend(snake_case )
# update the id of the oldest tweet less one
lowerCAmelCase = alltweets[-1].id - 1
print(F'...{len(snake_case )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'new_{screen_name}_tweets.csv' , 'w' ) as f:
lowerCAmelCase = csv.writer(snake_case )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(snake_case )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 514 |
'''simple docstring'''
def snake_case ( snake_case : str ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 514 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = (CMStochasticIterativeScheduler,)
a__ = 10
def _lowercase ( self : Any , **UpperCamelCase__ : List[str] ) -> Dict:
"""simple docstring"""
__magic_name__ = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**UpperCamelCase__ )
return config
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = 10
__magic_name__ = self.get_scheduler_config()
__magic_name__ = self.scheduler_classes[0](**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
__magic_name__ = scheduler.timesteps[0]
__magic_name__ = scheduler.timesteps[1]
__magic_name__ = self.dummy_sample
__magic_name__ = 0.1 * sample
__magic_name__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
__magic_name__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase__ )
__magic_name__ = 1
scheduler.set_timesteps(UpperCamelCase__ )
__magic_name__ = scheduler.timesteps
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase__ ):
# 1. scale model input
__magic_name__ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict noise residual
__magic_name__ = model(UpperCamelCase__ , UpperCamelCase__ )
# 3. predict previous sample x_t-1
__magic_name__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
__magic_name__ = pred_prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase__ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase__ )
__magic_name__ = [106, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
__magic_name__ = scheduler.timesteps
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__magic_name__ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict noise residual
__magic_name__ = model(UpperCamelCase__ , UpperCamelCase__ )
# 3. predict previous sample x_t-1
__magic_name__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
__magic_name__ = pred_prev_sample
__magic_name__ = torch.sum(torch.abs(UpperCamelCase__ ) )
__magic_name__ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase__ )
__magic_name__ = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase__ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase__ )
__magic_name__ = [39, 30, 12, 1, 0]
__magic_name__ = len(UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ )
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**UpperCamelCase__ )
__magic_name__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
| 529 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = ["""image_processor""", """tokenizer"""]
a__ = """BridgeTowerImageProcessor"""
a__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Dict , ) -> BatchEncoding:
"""simple docstring"""
__magic_name__ = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
__magic_name__ = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Dict , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 529 | 1 |
"""simple docstring"""
import argparse
import os
import re
__UpperCAmelCase = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__UpperCAmelCase = re.compile(R'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
__UpperCAmelCase = re.compile(R'''\s*\(\s*"(\S[^"]+)"''')
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : bool = False ) -> Dict:
'''simple docstring'''
with open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f:
a__ : Union[str, Any] = f.read()
a__ : List[Any] = content.split("\n" )
a__ : Dict = []
a__ : List[Any] = 0
while line_idx < len(lowerCAmelCase__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
a__ : int = len(re.search(R"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
a__ : Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
a__ : Optional[int] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
a__ : str = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : _re_identifier.search(lowerCAmelCase__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowerCAmelCase__ ) )
elif "\n".join(lowerCAmelCase__ ) != content:
return True
def lowercase__ ( lowerCAmelCase__ : bool = False ) -> Optional[Any]:
'''simple docstring'''
a__ : str = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for f in os.listdir(lowerCAmelCase__ ) if f.endswith(".py" )]
a__ : Optional[int] = [sort_auto_mapping(lowerCAmelCase__ , overwrite=lowerCAmelCase__ ) for fname in fnames]
if not overwrite and any(lowerCAmelCase__ ):
a__ : List[str] = [f for f, d in zip(lowerCAmelCase__ , lowerCAmelCase__ ) if d]
raise ValueError(
F"The following files have auto mappings that need sorting: {', '.join(lowerCAmelCase__ )}. Run `make style` to fix"
" this." )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__UpperCAmelCase = parser.parse_args()
sort_all_auto_mappings(not args.check_only) | 251 |
"""simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__UpperCAmelCase = datasets.utils.logging.get_logger(__name__)
@dataclass
class __UpperCAmelCase ( datasets.BuilderConfig ):
__lowerCamelCase : Optional[datasets.Features] = None
__lowerCamelCase : str = "utf-8"
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : bool = True # deprecated
__lowerCamelCase : Optional[int] = None # deprecated
__lowerCamelCase : int = 10 << 20 # 10MB
__lowerCamelCase : Optional[bool] = None
class __UpperCAmelCase ( datasets.ArrowBasedBuilder ):
__lowerCamelCase : Dict = JsonConfig
def UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
a__ : Union[str, Any] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self : str , a_ : List[str] ) -> List[str]:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
a__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
a__ : List[Any] = data_files
if isinstance(a_ , a_ ):
a__ : List[Any] = [files]
a__ : List[str] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a__ : Tuple = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
a__ : List[str] = [files]
a__ : int = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase ( self : Tuple , a_ : pa.Table ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
a__ : Optional[int] = self.config.features.arrow_schema.field(a_ ).type
a__ : int = pa_table.append_column(a_ , pa.array([None] * len(a_ ) , type=a_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
a__ : Optional[int] = table_cast(a_ , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase ( self : Union[str, Any] , a_ : Dict ) -> Any:
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(a_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a__ : Dict = json.load(a_ )
# We keep only the field we are interested in
a__ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(a_ , (list, tuple) ):
a__ : int = set().union(*[row.keys() for row in dataset] )
a__ : str = {col: [row.get(a_ ) for row in dataset] for col in keys}
else:
a__ : List[str] = dataset
a__ : List[str] = pa.Table.from_pydict(a_ )
yield file_idx, self._cast_table(a_ )
# If the file has one json object per line
else:
with open(a_ , "rb" ) as f:
a__ : Union[str, Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
a__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
a__ : str = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
a__ : List[str] = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(a_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
a__ : str = batch.decode(self.config.encoding , errors=a_ ).encode("utf-8" )
try:
while True:
try:
a__ : List[str] = paj.read_json(
io.BytesIO(a_ ) , read_options=paj.ReadOptions(block_size=a_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(a_ , pa.ArrowInvalid )
and "straddling" not in str(a_ )
or block_size > len(a_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"Batch of {len(a_ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
a_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
a__ : Any = json.load(a_ )
except json.JSONDecodeError:
logger.error(F"Failed to read file '{file}' with error {type(a_ )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(a_ , a_ ): # list is the only sequence type supported in JSON
try:
a__ : Optional[int] = set().union(*[row.keys() for row in dataset] )
a__ : int = {col: [row.get(a_ ) for row in dataset] for col in keys}
a__ : int = pa.Table.from_pydict(a_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"Failed to read file '{file}' with error {type(a_ )}: {e}" )
raise ValueError(F"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(a_ )
break
else:
logger.error(F"Failed to read file '{file}' with error {type(a_ )}: {e}" )
raise ValueError(
F"Not able to read records in the JSON file at {file}. "
F"You should probably indicate the field of the JSON file containing your records. "
F"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
F"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
batch_idx += 1 | 251 | 1 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase__ ( __snake_case, __snake_case=False ) -> List[str]:
"""simple docstring"""
try:
_UpperCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCamelCase = strtobool(__snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''' )
return _value
_a = parse_flag_from_env("""RUN_SLOW""", default=False)
_a = parse_flag_from_env("""RUN_REMOTE""", default=False)
_a = parse_flag_from_env("""RUN_LOCAL""", default=True)
_a = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
_a = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
_a = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
_a = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
_a = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
_a = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
_a = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
_a = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires faiss''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires regex''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires PyTorch''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires JAX''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCamelCase = unittest.skip('''test requires Pillow''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> List[Any]:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(__snake_case )
else:
return test_case
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(__snake_case )
else:
return test_case
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(__snake_case )
else:
return test_case
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
def _require_spacy_model(__snake_case ):
try:
import spacy # noqa F401
spacy.load(__snake_case )
except ImportError:
return unittest.skip('''test requires spacy''' )(__snake_case )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__snake_case ) )(__snake_case )
else:
return test_case
return _require_spacy_model
def lowerCamelCase__ ( __snake_case ) -> str:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(__snake_case )
else:
return test_case
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(__snake_case )
else:
return test_case
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCamelCase = unittest.skip('''test is slow''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Optional[int]:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCamelCase = unittest.skip('''test is local''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCamelCase = unittest.skip('''test is packaged''' )(__snake_case )
return test_case
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCamelCase = unittest.skip('''test requires remote''' )(__snake_case )
return test_case
def lowerCamelCase__ ( *__snake_case ) -> str:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__snake_case ) and name.startswith('''test''' ):
for decorator in decorators:
_UpperCamelCase = decorator(__snake_case )
setattr(cls, __snake_case, __snake_case )
return cls
return decorate
class _UpperCAmelCase( lowerCamelCase ):
pass
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 0
lowercase__ = 1
lowercase__ = 2
@contextmanager
def lowerCamelCase__ ( __snake_case=OfflineSimulationMode.CONNECTION_FAILS, __snake_case=1e-16 ) -> str:
"""simple docstring"""
_UpperCamelCase = requests.Session().request
def timeout_request(__snake_case, __snake_case, __snake_case, **__snake_case ):
# Change the url to an invalid url so that the connection hangs
_UpperCamelCase = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' )
_UpperCamelCase = timeout
try:
return online_request(__snake_case, __snake_case, **__snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCamelCase = url
_UpperCamelCase = e.args[0]
_UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''', F'''OfflineMock[{url}]''' ),)
_UpperCamelCase = (max_retry_error,)
raise
def raise_connection_error(__snake_case, __snake_case, **__snake_case ):
raise requests.ConnectionError('''Offline mode is enabled.''', request=__snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''', __snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''', __snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''', __snake_case ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def lowerCamelCase__ ( *__snake_case, **__snake_case ) -> str:
"""simple docstring"""
_UpperCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__snake_case, **__snake_case ) as tmp_dir:
try:
os.chdir(__snake_case )
yield
finally:
os.chdir(__snake_case )
@contextmanager
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
import gc
gc.collect()
_UpperCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
return deepcopy(__snake_case ).integers(0, 1_00, 10 ).tolist() == deepcopy(__snake_case ).integers(0, 1_00, 10 ).tolist()
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__snake_case, *__snake_case, **__snake_case ):
try:
return func(*__snake_case, **__snake_case )
except HTTPError as err:
if str(__snake_case ).startswith('''500''' ) or str(__snake_case ).startswith('''502''' ):
pytest.xfail(str(__snake_case ) )
raise err
return decorator.decorator(_wrapper, __snake_case )
class _UpperCAmelCase:
def __init__( self , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = returncode
_UpperCamelCase = stdout
_UpperCamelCase = stderr
async def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
while True:
_UpperCamelCase = await stream.readline()
if line:
callback(__snake_case )
else:
break
async def lowerCamelCase__ ( __snake_case, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=False, __snake_case=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''', ''' '''.join(__snake_case ) )
_UpperCamelCase = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=__snake_case, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=__snake_case, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCamelCase = []
_UpperCamelCase = []
def tee(__snake_case, __snake_case, __snake_case, __snake_case="" ):
_UpperCamelCase = line.decode('''utf-8''' ).rstrip()
sink.append(__snake_case )
if not quiet:
print(__snake_case, __snake_case, file=__snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout, lambda __snake_case : tee(__snake_case, __snake_case, sys.stdout, label='''stdout:''' ) ),
_read_stream(p.stderr, lambda __snake_case : tee(__snake_case, __snake_case, sys.stderr, label='''stderr:''' ) ),
], timeout=__snake_case, )
return _RunOutput(await p.wait(), __snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case=None, __snake_case=None, __snake_case=1_80, __snake_case=False, __snake_case=True ) -> _RunOutput:
"""simple docstring"""
_UpperCamelCase = asyncio.get_event_loop()
_UpperCamelCase = loop.run_until_complete(
_stream_subprocess(__snake_case, env=__snake_case, stdin=__snake_case, timeout=__snake_case, quiet=__snake_case, echo=__snake_case ) )
_UpperCamelCase = ''' '''.join(__snake_case )
if result.returncode > 0:
_UpperCamelCase = '''\n'''.join(result.stderr )
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' )
return result
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''', '''gw0''' )
_UpperCamelCase = re.sub(r'''^gw''', '''''', __snake_case, 0, re.M )
return int(__snake_case )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 2_95_00
_UpperCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 19 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _UpperCAmelCase:
lowercase__ = PegasusConfig
lowercase__ = {}
lowercase__ = 'gelu'
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCamelCase = prepare_pegasus_inputs_dict(__a , __a , __a)
return config, inputs_dict
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''')
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def UpperCAmelCase ( self , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''])
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a)
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''')
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
_UpperCamelCase = model.decode(__a , __a , decoder_attention_mask=__a)
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''')
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, ) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.not_equal(__snake_case, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowercase__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowercase__ = True
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(__a , __a)
_UpperCamelCase = model_class(__a)
@jax.jit
def encode_jitted(__a , __a=None , **__a):
return model.encode(input_ids=__a , attention_mask=__a)
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = encode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = model_class(__a)
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''])
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a , __a , __a):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled'''):
_UpperCamelCase = decode_jitted(**__a).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**__a).to_tuple()
self.assertEqual(len(__a) , len(__a))
for jitted_output, output in zip(__a , __a):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=__a)
_UpperCamelCase = np.ones((1, 1))
_UpperCamelCase = model(__a)
self.assertIsNotNone(__a)
@slow
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''')
_UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
_UpperCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''np''' , truncation=__a , max_length=5_12 , padding=__a)
_UpperCamelCase = model.generate(**__a , num_beams=2).sequences
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
assert tgt_text == decoded
| 19 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase__ = '''\
'''
lowerCAmelCase__ = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
lowerCAmelCase__ = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : int = 1_6 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_lowerCamelCase : List[str] = '''cuda'''
else:
_lowerCamelCase : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_lowerCamelCase : Tuple = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(__lowerCAmelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_lowerCamelCase : List[Any] = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__lowerCAmelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_lowerCamelCase : Optional[Any] = model.config.max_length - 1
else:
_lowerCamelCase : Any = model.config.max_length
_lowerCamelCase : Optional[Any] = tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''pt''' , return_attention_mask=__lowerCAmelCase , ).to(__lowerCAmelCase )
_lowerCamelCase : Tuple = encodings['''input_ids''']
_lowerCamelCase : Union[str, Any] = encodings['''attention_mask''']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : str = CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ) ):
_lowerCamelCase : List[Any] = min(start_index + batch_size , len(__lowerCAmelCase ) )
_lowerCamelCase : str = encoded_texts[start_index:end_index]
_lowerCamelCase : int = attn_masks[start_index:end_index]
if add_start_token:
_lowerCamelCase : Optional[int] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_lowerCamelCase : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__lowerCAmelCase ), attn_mask] , dim=1 )
_lowerCamelCase : Tuple = encoded_batch
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase ).logits
_lowerCamelCase : Dict = out_logits[..., :-1, :].contiguous()
_lowerCamelCase : List[Any] = labels[..., 1:].contiguous()
_lowerCamelCase : Union[str, Any] = attn_mask[..., 1:].contiguous()
_lowerCamelCase : str = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __lowerCAmelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__lowerCAmelCase )}
| 598 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "canine"
def __init__( self : List[Any] , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : List[Any]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : str=3_0_7_2 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=1_6_3_8_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Dict=1E-12 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=0xe0_00 , __lowerCAmelCase : Optional[int]=0xe0_01 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Optional[int]=8 , __lowerCAmelCase : List[Any]=1_6_3_8_4 , __lowerCAmelCase : Optional[Any]=1_2_8 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Character config:
_lowerCamelCase : Dict = downsampling_rate
_lowerCamelCase : str = upsampling_kernel_size
_lowerCamelCase : List[Any] = num_hash_functions
_lowerCamelCase : Dict = num_hash_buckets
_lowerCamelCase : Optional[Any] = local_transformer_stride
| 598 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCamelCase__ : Any = NewType('DataClass', Any)
lowerCamelCase__ : Tuple = NewType('DataClassType', Any)
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Tuple:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> Callable[[str], Any]:
SCREAMING_SNAKE_CASE_ = {str(__UpperCAmelCase ): choice for choice in choices}
return lambda __UpperCAmelCase : str_to_choice.get(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( *,
__UpperCAmelCase : Union[str, List[str]] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : Any = dataclasses.MISSING , __UpperCAmelCase : Callable[[], Any] = dataclasses.MISSING , __UpperCAmelCase : dict = None , **__UpperCAmelCase : Dict , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE_ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE_ = aliases
if help is not None:
SCREAMING_SNAKE_CASE_ = help
return dataclasses.field(metadata=__UpperCAmelCase , default=__UpperCAmelCase , default_factory=__UpperCAmelCase , **__UpperCAmelCase )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
def __init__( self : Any , _lowerCAmelCase : Union[DataClassType, Iterable[DataClassType]] , **_lowerCAmelCase : Dict ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE_ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [dataclass_types]
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser , _lowerCAmelCase : dataclasses.Field ):
SCREAMING_SNAKE_CASE_ = F"--{field.name}"
SCREAMING_SNAKE_CASE_ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCAmelCase ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('aliases' , [] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = [aliases]
SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase , 'UnionType' ) and isinstance(_lowerCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F" Problem encountered in field '{field.name}'." )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE_ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE_ = (
field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE_ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE_ = {}
if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE_ = field.type.__args__
else:
SCREAMING_SNAKE_CASE_ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE_ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = field.default
else:
SCREAMING_SNAKE_CASE_ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE_ = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE_ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE_ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE_ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE_ = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE_ = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = field.type.__args__[0]
SCREAMING_SNAKE_CASE_ = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = True
else:
SCREAMING_SNAKE_CASE_ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE_ = field.default_factory()
else:
SCREAMING_SNAKE_CASE_ = True
parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE_ = False
parser.add_argument(F"--no_{field.name}" , action='store_false' , dest=field.name , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : DataClassType ):
if hasattr(_lowerCAmelCase , '_argument_group_name' ):
SCREAMING_SNAKE_CASE_ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE_ = self
try:
SCREAMING_SNAKE_CASE_ = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = '.'.join(map(_lowerCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
SCREAMING_SNAKE_CASE_ = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : int=None , _lowerCAmelCase : str=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Any=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE_ = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE_ = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = args_file_parser.parse_known_args(args=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = vars(_lowerCAmelCase ).get(args_file_flag.lstrip('-' ) , _lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE_ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE_ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.parse_known_args(args=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE_ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict[str, Any] , _lowerCAmelCase : bool = False ):
SCREAMING_SNAKE_CASE_ = set(args.keys() )
SCREAMING_SNAKE_CASE_ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE_ = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
SCREAMING_SNAKE_CASE_ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE_ = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}" )
return tuple(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
with open(Path(_lowerCAmelCase ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE_ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE_ = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
SCREAMING_SNAKE_CASE_ = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase ) | 31 |
'''simple docstring'''
lowerCAmelCase : List[str] = 2_5_6
# Modulus to hash a string
lowerCAmelCase : Tuple = 1_0_0_0_0_0_3
def _A ( A ,A ) -> bool:
lowercase : List[Any] = len(A )
lowercase : List[Any] = len(A )
if p_len > t_len:
return False
lowercase : List[str] = 0
lowercase : Dict = 0
lowercase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(A ):
lowercase : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ) -> None:
lowercase : Dict = "abc1abc12"
lowercase : Union[str, Any] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(A ,A ) and not rabin_karp(A ,A )
# Test 2)
lowercase : str = "ABABX"
lowercase : Union[str, Any] = "ABABZABABYABABX"
assert rabin_karp(A ,A )
# Test 3)
lowercase : str = "AAAB"
lowercase : List[str] = "ABAAAAAB"
assert rabin_karp(A ,A )
# Test 4)
lowercase : List[str] = "abcdabcy"
lowercase : str = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(A ,A )
# Test 5)
lowercase : int = "Lü"
lowercase : Optional[Any] = "Lüsai"
assert rabin_karp(A ,A )
lowercase : Tuple = "Lue"
assert not rabin_karp(A ,A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 372 | 0 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowerCAmelCase_ ( __snake_case ):
# to overwrite at feature extractactor specific tests
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : str = None
@property
def __a ( self ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def __a ( self ):
_lowercase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'feature_size' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'padding_value' ) )
def __a ( self ):
_lowercase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_lowercase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Union[str, Any] = feat_extract.model_input_names[0]
_lowercase : int = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_lowerCAmelCase ) == len(_lowerCAmelCase ) for x, y in zip(_lowerCAmelCase , processed_features[input_name] ) ) )
_lowercase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
_lowercase : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
_lowercase : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowercase : Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __a ( self ):
_lowercase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
_lowercase : Any = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : List[Any] = feat_extract.model_input_names[0]
_lowercase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
_lowercase : List[str] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowercase : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __a ( self ):
_lowercase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCAmelCase )
_lowercase : str = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Optional[int] = feat_extract.model_input_names[0]
_lowercase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
_lowercase : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowercase : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __a ( self , _lowerCAmelCase=False ):
def _inputs_have_equal_length(_lowerCAmelCase ):
_lowercase : str = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1E-3 ):
return False
return True
_lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
_lowercase : Any = feat_extract.model_input_names[0]
_lowercase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
_lowercase : str = self.feat_extract_tester.seq_length_diff
_lowercase : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff
_lowercase : int = self.feat_extract_tester.min_seq_length
_lowercase : Optional[int] = self.feat_extract_tester.batch_size
_lowercase : Optional[int] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_lowercase : int = feat_extract.pad(_lowerCAmelCase , padding=_lowerCAmelCase )
_lowercase : int = input_a[input_name]
_lowercase : Union[str, Any] = feat_extract.pad(_lowerCAmelCase , padding='longest' )
_lowercase : Dict = input_a[input_name]
_lowercase : Any = feat_extract.pad(_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
_lowercase : Optional[int] = input_a[input_name]
_lowercase : int = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
_lowercase : Tuple = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding='max_length' )[input_name]
_lowercase : List[str] = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , return_tensors='np' )
_lowercase : int = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_lowercase : Any = feat_extract.pad(_lowerCAmelCase , pad_to_multiple_of=1_0 )
_lowercase : List[str] = input_a[input_name]
_lowercase : int = feat_extract.pad(_lowerCAmelCase , padding='longest' , pad_to_multiple_of=1_0 )
_lowercase : Optional[Any] = input_a[input_name]
_lowercase : int = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=_lowerCAmelCase )
_lowercase : int = input_a[input_name]
_lowercase : List[str] = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=_lowerCAmelCase , return_tensors='np' , )
_lowercase : Optional[int] = input_a[input_name]
self.assertTrue(all(len(_lowerCAmelCase ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
_lowercase : str = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(_lowerCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_lowercase : int = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __a ( self , _lowerCAmelCase=False ):
def _inputs_have_equal_length(_lowerCAmelCase ):
_lowercase : Optional[Any] = len(input[0] )
for input_slice in input[1:]:
if len(_lowerCAmelCase ) != length:
return False
return True
def _inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ):
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(_lowerCAmelCase , _lowerCAmelCase ):
if not np.allclose(np.asarray(_lowerCAmelCase ) , np.asarray(_lowerCAmelCase ) , atol=1E-3 ):
return False
return True
_lowercase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCAmelCase )
_lowercase : List[str] = feat_extract.model_input_names[0]
_lowercase : Any = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_lowercase : str = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_lowerCAmelCase )
_lowercase : Optional[Any] = input_a[input_name]
_lowercase : Optional[Any] = feat_extract.pad(_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
_lowercase : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to smallest with np
_lowercase : List[str] = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_lowerCAmelCase , )
_lowercase : Dict = input_a[input_name]
_lowercase : Any = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
_lowercase : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
# truncate to middle
_lowercase : Union[str, Any] = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase , return_tensors='np' , )
_lowercase : Union[str, Any] = input_a[input_name]
_lowercase : Any = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_lowerCAmelCase )
_lowercase : Optional[int] = input_a[input_name]
_lowercase : Dict = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
_lowercase : Tuple = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(_inputs_are_equal(_lowerCAmelCase , _lowerCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding='longest' , truncation=_lowerCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding='longest' , truncation=_lowerCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_lowerCAmelCase ):
feat_extract.pad(_lowerCAmelCase , padding='max_length' , truncation=_lowerCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_lowercase : int = 1_2
_lowercase : List[Any] = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , truncation=_lowerCAmelCase , )
_lowercase : Optional[int] = input_a[input_name]
_lowercase : Tuple = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCAmelCase , )
_lowercase : Dict = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_lowercase : Tuple = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_lowercase : int = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_lowerCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(_lowerCAmelCase ) )
def __a ( self ):
self._check_padding(numpify=_lowerCAmelCase )
def __a ( self ):
self._check_padding(numpify=_lowerCAmelCase )
def __a ( self ):
self._check_truncation(numpify=_lowerCAmelCase )
def __a ( self ):
self._check_truncation(numpify=_lowerCAmelCase )
@require_torch
def __a ( self ):
_lowercase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_common()
_lowercase : Dict = feat_extract.model_input_names[0]
_lowercase : Optional[int] = BatchFeature({input_name: speech_inputs} )
_lowercase : str = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
_lowercase : Tuple = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __a ( self ):
_lowercase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
_lowercase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_lowercase : str = feat_extract.model_input_names[0]
_lowercase : Tuple = BatchFeature({input_name: speech_inputs} )
_lowercase : Optional[Any] = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )[input_name]
_lowercase : Tuple = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ):
_lowercase : Union[str, Any] = self.feat_extract_dict
_lowercase : Optional[Any] = True
_lowercase : int = self.feature_extraction_class(**_lowerCAmelCase )
_lowercase : str = self.feat_extract_tester.prepare_inputs_for_common()
_lowercase : List[Any] = [len(_lowerCAmelCase ) for x in speech_inputs]
_lowercase : Dict = feat_extract.model_input_names[0]
_lowercase : Tuple = BatchFeature({input_name: speech_inputs} )
_lowercase : Tuple = feat_extract.pad(_lowerCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCAmelCase )
def __a ( self ):
_lowercase : Dict = self.feat_extract_dict
_lowercase : int = True
_lowercase : Tuple = self.feature_extraction_class(**_lowerCAmelCase )
_lowercase : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
_lowercase : Optional[Any] = [len(_lowerCAmelCase ) for x in speech_inputs]
_lowercase : int = feat_extract.model_input_names[0]
_lowercase : Tuple = BatchFeature({input_name: speech_inputs} )
_lowercase : Optional[Any] = min(_lowerCAmelCase )
_lowercase : Tuple = feat_extract.pad(
_lowerCAmelCase , padding='max_length' , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 677 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = 0 ) -> list:
_lowercase : List[str] = end or len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Dict = i
_lowercase : str = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_lowercase : Optional[Any] = array[temp_index - 1]
temp_index -= 1
_lowercase : Optional[Any] = temp_index_value
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None: # Max Heap
_lowercase : List[str] = index
_lowercase : List[str] = 2 * index + 1 # Left Node
_lowercase : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_lowercase : Any = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_lowercase : str = right_index
if largest != index:
_lowercase , _lowercase : List[str] = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
_lowercase : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_lowercase , _lowercase : List[Any] = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
return array
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
_lowercase : Optional[Any] = low
_lowercase : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_lowercase , _lowercase : Tuple = array[j], array[i]
i += 1
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) == 0:
return array
_lowercase : List[str] = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE ) ) )
_lowercase : str = 16
return intro_sort(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE )
max_depth -= 1
_lowercase : int = median_of_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_lowercase : str = partition(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
intro_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = p
return insertion_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input("Enter numbers separated by a comma : ").strip()
UpperCamelCase = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 677 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """ChineseCLIPImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase__ , )
UpperCamelCase = kwargs.pop('feature_extractor' )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self.image_processor
def __call__( self : int , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCamelCase = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def A ( self : Tuple , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCamelCase__ , )
return self.image_processor_class
| 430 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = len(A__ )
UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCamelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCamelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCamelCase = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = """dpt"""
def __init__( self : Optional[int] , lowercase_ : Union[str, Any]=768 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Tuple=3_072 , lowercase_ : int="gelu" , lowercase_ : Optional[int]=0.0 , lowercase_ : str=0.0 , lowercase_ : List[Any]=0.02 , lowercase_ : Optional[int]=1E-12 , lowercase_ : Any=384 , lowercase_ : List[Any]=16 , lowercase_ : Any=3 , lowercase_ : Dict=False , lowercase_ : Union[str, Any]=True , lowercase_ : str=[2, 5, 8, 11] , lowercase_ : Union[str, Any]="project" , lowercase_ : Tuple=[4, 2, 1, 0.5] , lowercase_ : Dict=[96, 192, 384, 768] , lowercase_ : List[Any]=256 , lowercase_ : List[Any]=-1 , lowercase_ : Dict=False , lowercase_ : List[str]=True , lowercase_ : List[str]=0.4 , lowercase_ : List[Any]=255 , lowercase_ : Dict=0.1 , lowercase_ : Optional[Any]=[1, 1_024, 24, 24] , lowercase_ : List[Any]=[0, 1] , lowercase_ : Optional[int]=None , **lowercase_ : Any , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : List[Any] = hidden_size
UpperCAmelCase : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('Initializing the config with a `BiT` backbone.' )
UpperCAmelCase : str = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
}
UpperCAmelCase : Tuple = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
logger.info('Initializing the config with a `BiT` backbone.' )
UpperCAmelCase : Optional[int] = BitConfig(**lowercase_ )
elif isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase : Optional[int] = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
UpperCAmelCase : int = backbone_featmap_shape
UpperCAmelCase : str = neck_ignore_stages
if readout_type != "project":
raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' )
else:
UpperCAmelCase : int = None
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : int = []
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : str = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Any = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : int = layer_norm_eps
UpperCAmelCase : int = image_size
UpperCAmelCase : str = patch_size
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Union[str, Any] = qkv_bias
UpperCAmelCase : Union[str, Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' )
UpperCAmelCase : int = readout_type
UpperCAmelCase : Union[str, Any] = reassemble_factors
UpperCAmelCase : Any = neck_hidden_sizes
UpperCAmelCase : Dict = fusion_hidden_size
UpperCAmelCase : List[Any] = head_in_index
UpperCAmelCase : Dict = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase : List[Any] = use_auxiliary_head
UpperCAmelCase : str = auxiliary_loss_weight
UpperCAmelCase : Optional[Any] = semantic_loss_ignore_index
UpperCAmelCase : Optional[int] = semantic_classifier_dropout
def UpperCAmelCase_ ( self : str ) -> int:
UpperCAmelCase : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase : Tuple = self.backbone_config.to_dict()
UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 695 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
lowercase__ = {
"gpt-neox-20b": 2048,
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowercase_ : Any=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : List[str]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : List[str]=False , **lowercase_ : Union[str, Any] , ) -> str:
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase_ ) != add_prefix_space:
UpperCAmelCase : Tuple = getattr(lowercase_ , pre_tok_state.pop('type' ) )
UpperCAmelCase : Optional[Any] = add_prefix_space
UpperCAmelCase : Tuple = pre_tok_class(**lowercase_ )
UpperCAmelCase : Any = add_prefix_space
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Optional[int] = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : "Conversation" ) -> List[int]:
UpperCAmelCase : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
UpperCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
| 695 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
_SCREAMING_SNAKE_CASE : Any = jax.device_count()
_SCREAMING_SNAKE_CASE : Optional[Any] = num_samples * [prompt]
_SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.prepare_inputs(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[str] = replicate(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[Any] = shard(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(__SCREAMING_SNAKE_CASE , jax.device_count() )
_SCREAMING_SNAKE_CASE : Any = sd_pipe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=__SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_SCREAMING_SNAKE_CASE : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : List[Any] = images[0, 253:256, 253:256, -1]
_SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = "stabilityai/stable-diffusion-2"
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="scheduler" )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , revision="bf16" , dtype=jnp.bfloataa , )
_SCREAMING_SNAKE_CASE : str = scheduler_params
_SCREAMING_SNAKE_CASE : Optional[Any] = "A painting of a squirrel eating a burger"
_SCREAMING_SNAKE_CASE : int = jax.device_count()
_SCREAMING_SNAKE_CASE : Optional[Any] = num_samples * [prompt]
_SCREAMING_SNAKE_CASE : str = sd_pipe.prepare_inputs(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = replicate(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Any = shard(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(__SCREAMING_SNAKE_CASE , jax.device_count() )
_SCREAMING_SNAKE_CASE : Any = sd_pipe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=__SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_SCREAMING_SNAKE_CASE : List[str] = images[0, 253:256, 253:256, -1]
_SCREAMING_SNAKE_CASE : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_SCREAMING_SNAKE_CASE : List[str] = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 572 |
"""simple docstring"""
from itertools import product
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
a__ = sides_number
a__ = max_face_number * dice_number
a__ = [0] * (max_total + 1)
a__ = 1
a__ = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
a__ = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __magic_name__ ( ) -> float:
a__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a__ = 0
a__ = 9
a__ = 4 * 9
a__ = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a__ = (4**9) * (6**6)
a__ = peter_wins_count / total_games_number
a__ = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : Dict=10 , UpperCamelCase_ : List[Any]=[8, 16, 32, 64] , UpperCamelCase_ : List[Any]=[1, 1, 2, 1] , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Tuple="relu" , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=["stage2", "stage3", "stage4"] , UpperCamelCase_ : Optional[Any]=[2, 3, 4] , UpperCamelCase_ : Tuple=1 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : int = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Any = image_size
lowerCamelCase_ : Optional[int] = num_channels
lowerCamelCase_ : str = embeddings_size
lowerCamelCase_ : int = hidden_sizes
lowerCamelCase_ : Tuple = depths
lowerCamelCase_ : Optional[Any] = is_training
lowerCamelCase_ : List[Any] = use_labels
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Optional[Any] = num_labels
lowerCamelCase_ : Any = scope
lowerCamelCase_ : str = len(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = out_features
lowerCamelCase_ : Tuple = out_indices
lowerCamelCase_ : List[Any] = num_groups
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : List[Any] = None
if self.use_labels:
lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = BitModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = self.num_labels
lowerCamelCase_ : Union[str, Any] = BitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : List[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Dict = BitBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : int = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase_ : int = None
lowerCamelCase_ : Union[str, Any] = BitBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : List[str] = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ : Union[str, Any] = config_and_inputs
lowerCamelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( _lowerCAmelCase ,_lowerCAmelCase ,unittest.TestCase ):
A = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
A = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = BitModelTester(self )
lowerCamelCase_ : Optional[int] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Tuple = model_class(UpperCamelCase_ )
lowerCamelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Tuple = model_class(config=UpperCamelCase_ )
for name, module in model.named_modules():
if isinstance(UpperCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCamelCase_ : Optional[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Any = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCamelCase_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : Tuple = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ : Union[str, Any] = layer_type
lowerCamelCase_ : Tuple = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Optional[int] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : str = BitModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = self.default_image_processor
lowerCamelCase_ : List[Any] = prepare_img()
lowerCamelCase_ : List[Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Dict = model(**UpperCamelCase_ )
# verify the logits
lowerCamelCase_ : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase__ ( _lowerCAmelCase ,unittest.TestCase ):
A = (BitBackbone,) if is_torch_available() else ()
A = BitConfig
A = False
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = BitModelTester(self )
| 721 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowerCamelCase_ : str = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase_ : Dict = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase_ : str = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
lowerCamelCase_ : Union[str, Any] = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase_ : Tuple = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase_ : Optional[int] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase_ : str = model(UpperCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase_ , atol=1e-3 ) )
| 418 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _a ( UpperCAmelCase ) -> list[list[float]]:
"""simple docstring"""
lowerCamelCase__ : List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCamelCase__ : Tuple = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowerCamelCase__ : Dict = [[0.0, 0.0], [0.0, 0.0]]
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = matrix[1][1], matrix[0][0]
lowerCamelCase__ , lowerCamelCase__ : List[str] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCamelCase__ : int = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowerCamelCase__ : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCamelCase__ : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCamelCase__ : Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCamelCase__ : Dict = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCamelCase__ : int = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCamelCase__ : List[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCamelCase__ : List[str] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCamelCase__ : List[str] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCamelCase__ : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCamelCase__ : Union[str, Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCamelCase__ : List[str] = array(UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
lowerCamelCase__ : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCamelCase__ : List[str] = array(UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 315 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = (DDPMScheduler,)
def __lowerCamelCase ( self : Optional[int] , **A : List[str] ) ->Union[str, Any]:
lowerCamelCase__ : int = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A )
return config
def __lowerCamelCase ( self : Any ) ->Optional[int]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def __lowerCamelCase ( self : str ) ->List[str]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def __lowerCamelCase ( self : str ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A )
def __lowerCamelCase ( self : Dict ) ->str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def __lowerCamelCase ( self : Optional[Any] ) ->str:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=A )
def __lowerCamelCase ( self : List[Any] ) ->Optional[int]:
lowerCamelCase__ : Tuple = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : Dict = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : int = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : Union[str, Any] = scheduler_class(**A )
lowerCamelCase__ : Dict = len(A )
lowerCamelCase__ : Any = self.dummy_model()
lowerCamelCase__ : List[Any] = self.dummy_sample_deter
lowerCamelCase__ : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
lowerCamelCase__ : Union[str, Any] = model(A , A )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Any = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ : Union[str, Any] = pred_prev_sample
lowerCamelCase__ : Optional[Any] = torch.sum(torch.abs(A ) )
lowerCamelCase__ : Dict = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def __lowerCamelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Any = self.scheduler_classes[0]
lowerCamelCase__ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase__ : List[str] = scheduler_class(**A )
lowerCamelCase__ : str = len(A )
lowerCamelCase__ : int = self.dummy_model()
lowerCamelCase__ : Any = self.dummy_sample_deter
lowerCamelCase__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
lowerCamelCase__ : Tuple = model(A , A )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : int = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ : Union[str, Any] = pred_prev_sample
lowerCamelCase__ : Optional[int] = torch.sum(torch.abs(A ) )
lowerCamelCase__ : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : int = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**A )
lowerCamelCase__ : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=A )
lowerCamelCase__ : int = scheduler.timesteps
for i, timestep in enumerate(A ):
if i == len(A ) - 1:
lowerCamelCase__ : Any = -1
else:
lowerCamelCase__ : int = timesteps[i + 1]
lowerCamelCase__ : Optional[int] = scheduler.previous_timestep(A )
lowerCamelCase__ : Optional[int] = prev_t.item()
self.assertEqual(A , A )
def __lowerCamelCase ( self : str ) ->Optional[int]:
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : int = scheduler_class(**A )
lowerCamelCase__ : Any = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(A , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A )
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : List[Any] = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**A )
lowerCamelCase__ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCamelCase__ : Optional[int] = len(A )
with self.assertRaises(A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def __lowerCamelCase ( self : List[Any] ) ->Dict:
lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase__ : str = scheduler_class(**A )
lowerCamelCase__ : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A )
| 315 | 1 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase = 2_5_0_0_0_4
__UpperCAmelCase = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class A__ ( A , unittest.TestCase ):
"""simple docstring"""
_lowercase : str = MBartaaTokenizer
_lowercase : List[str] = MBartaaTokenizerFast
_lowercase : Optional[Any] = True
_lowercase : str = True
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Union[str, Any] = MBartaaTokenizer(__UpperCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "<s>"
_lowerCAmelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__UpperCamelCase ) , 1_0_5_4 )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = MBartaaTokenizer(__UpperCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__UpperCamelCase )
_lowerCAmelCase : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_lowerCAmelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
_lowerCAmelCase : str = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {"input_ids": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_lowerCAmelCase : str = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_lowerCAmelCase : List[str] = tempfile.mkdtemp()
_lowerCAmelCase : Dict = tokenizer_r.save_pretrained(__UpperCamelCase )
_lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_lowerCAmelCase : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
_lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(__UpperCamelCase )
_lowerCAmelCase : str = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
_lowerCAmelCase : List[str] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Dict = tokenizer_r.from_pretrained(__UpperCamelCase )
_lowerCAmelCase : Any = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
_lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
_lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_lowerCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(__UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
"""simple docstring"""
_lowercase : str = '''facebook/mbart-large-50-one-to-many-mmt'''
_lowercase : List[str] = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
_lowercase : List[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
_lowercase : Any = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def __magic_name__ ( cls : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Any = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
_lowerCAmelCase : Union[str, Any] = 1
return cls
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 2_5_0_0_3_8 )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
_lowerCAmelCase : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
_lowerCAmelCase : Any = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
_lowerCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def __magic_name__ ( self : Any ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ["this is gunna be a long sentence " * 2_0]
assert isinstance(src_text[0] , __UpperCamelCase )
_lowerCAmelCase : Optional[int] = 1_0
_lowerCAmelCase : Dict = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def __magic_name__ ( self : Any ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Tuple = tempfile.mkdtemp()
_lowerCAmelCase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
_lowerCAmelCase : str = MBartaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="pt" )
_lowerCAmelCase : Any = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_lowerCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
_lowerCAmelCase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="pt" )
_lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=1_0 , return_tensors="pt" )
_lowerCAmelCase : Tuple = targets["input_ids"]
_lowerCAmelCase : Dict = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 2_5_0_0_0_1,
} , )
| 715 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( A , unittest.TestCase ):
"""simple docstring"""
_lowercase : List[str] = KandinskyVaaControlnetImgaImgPipeline
_lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : str = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_lowercase : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
return 1_0_0
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(**A_ )
return model
@property
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Tuple = {
"num_train_timesteps": 1_0_0_0,
"beta_schedule": "linear",
"beta_start": 0.00085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
_lowerCAmelCase : int = DDIMScheduler(**A_ )
_lowerCAmelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __magic_name__ ( self : Union[str, Any] , A_ : Union[str, Any] , A_ : Optional[int]=0 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
_lowerCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
_lowerCAmelCase : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A_ ) ).to(A_ )
_lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Tuple = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) )
# create hint
_lowerCAmelCase : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith("mps" ):
_lowerCAmelCase : Tuple = torch.manual_seed(A_ )
else:
_lowerCAmelCase : Any = torch.Generator(device=A_ ).manual_seed(A_ )
_lowerCAmelCase : Union[str, Any] = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 6_4,
"width": 6_4,
"num_inference_steps": 1_0,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**A_ )
_lowerCAmelCase : Optional[Any] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : List[str] = pipe(**self.get_dummy_inputs(A_ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : str = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCAmelCase : List[Any] = np.array(
[0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
_lowerCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
_lowerCAmelCase : Union[str, Any] = init_image.resize((5_1_2, 5_1_2) )
_lowerCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
_lowerCAmelCase : Tuple = torch.from_numpy(np.array(A_ ) ).float() / 255.0
_lowerCAmelCase : Optional[int] = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_lowerCAmelCase : List[str] = "A robot, 4k photo"
_lowerCAmelCase : Dict = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
_lowerCAmelCase : str = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[int] = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt="" , ).to_tuple()
_lowerCAmelCase : List[Any] = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=1_0_0 , height=5_1_2 , width=5_1_2 , strength=0.5 , output_type="np" , )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(A_ , A_ )
| 503 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ : Tuple =logging.get_logger(__name__)
A_ : List[Any] ={
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = "deta"
SCREAMING_SNAKE_CASE__ : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , a__=None , a__=9_00 , a__=20_48 , a__=6 , a__=20_48 , a__=8 , a__=6 , a__=10_24 , a__=8 , a__=0.0 , a__=True , a__="relu" , a__=2_56 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=1.0 , a__=True , a__=False , a__="sine" , a__=5 , a__=4 , a__=4 , a__=True , a__=3_00 , a__=True , a__=True , a__=1 , a__=5 , a__=2 , a__=1 , a__=1 , a__=5 , a__=2 , a__=0.1 , a__=0.25 , **a__ , ):
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_lowerCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a__ , a__ ):
_lowerCamelCase = backbone_config.pop('model_type' )
_lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
_lowerCamelCase = config_class.from_dict(a__ )
_lowerCamelCase = backbone_config
_lowerCamelCase = num_queries
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = d_model
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = init_xavier_std
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = auxiliary_loss
_lowerCamelCase = position_embedding_type
# deformable attributes
_lowerCamelCase = num_feature_levels
_lowerCamelCase = encoder_n_points
_lowerCamelCase = decoder_n_points
_lowerCamelCase = two_stage
_lowerCamelCase = two_stage_num_proposals
_lowerCamelCase = with_box_refine
_lowerCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
_lowerCamelCase = class_cost
_lowerCamelCase = bbox_cost
_lowerCamelCase = giou_cost
# Loss coefficients
_lowerCamelCase = mask_loss_coefficient
_lowerCamelCase = dice_loss_coefficient
_lowerCamelCase = bbox_loss_coefficient
_lowerCamelCase = giou_loss_coefficient
_lowerCamelCase = eos_coefficient
_lowerCamelCase = focal_alpha
super().__init__(is_encoder_decoder=a__ , **a__ )
@property
def snake_case_ ( self ):
return self.encoder_attention_heads
@property
def snake_case_ ( self ):
return self.d_model
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.backbone_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 650 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
A_ : Union[str, Any] ={
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def SCREAMING_SNAKE_CASE_ ( snake_case : str = "dhaka" , snake_case : int = 5 )-> int:
_lowerCamelCase = min(snake_case , 50 ) # Prevent abuse!
_lowerCamelCase = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
_lowerCamelCase = requests.get('https://www.google.com/search' , params=snake_case , headers=snake_case )
_lowerCamelCase = BeautifulSoup(html.text , 'html.parser' )
_lowerCamelCase = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
_lowerCamelCase = json.dumps(snake_case )
_lowerCamelCase = json.loads(snake_case )
_lowerCamelCase = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , snake_case , )
if not matched_google_image_data:
return 0
_lowerCamelCase = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(snake_case ) , )
_lowerCamelCase = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , snake_case , )
for index, fixed_full_res_image in enumerate(snake_case ):
if index >= max_images:
return index
_lowerCamelCase = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
_lowerCamelCase = bytes(snake_case , 'ascii' ).decode(
'unicode-escape' )
_lowerCamelCase = urllib.request.build_opener()
_lowerCamelCase = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(snake_case )
_lowerCamelCase = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
urllib.request.urlretrieve( # noqa: S310
snake_case , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
A_ : Any =download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print("""Please provide a search term.""")
raise
| 650 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712 |
def __UpperCAmelCase ( a_ , a_):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers')
snake_case_ = str(bin(a_))
binary_number += "0" * shift_amount
return binary_number
def __UpperCAmelCase ( a_ , a_):
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers')
snake_case_ = str(bin(a_))[2:]
if shift_amount >= len(a_):
return "0b0"
snake_case_ = binary_number[: len(a_) - shift_amount]
return "0b" + shifted_binary_number
def __UpperCAmelCase ( a_ , a_):
if number >= 0: # Get binary representation of positive number
snake_case_ = '0' + str(bin(a_)).strip('-')[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ = len(bin(a_)[3:]) # Find 2's complement of number
snake_case_ = bin(abs(a_) - (1 << binary_number_length))[3:]
snake_case_ = (
'1' + '0' * (binary_number_length - len(a_)) + binary_number
)
if shift_amount >= len(a_):
return "0b" + binary_number[0] * len(a_)
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a_) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 607 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A : Optional[int] = logging.get_logger(__name__)
A : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : str =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase__ )} )
__UpperCAmelCase : str =field(
default=lowerCAmelCase__ ,metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
__UpperCAmelCase : int =field(
default=1_2_8 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
__UpperCAmelCase : int =field(
default=1_2_8 ,metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} ,)
__UpperCAmelCase : int =field(
default=6_4 ,metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} ,)
__UpperCAmelCase : int =field(
default=3_0 ,metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} ,)
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
__UpperCAmelCase : float =field(
default=0.0 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
__UpperCAmelCase : int =field(
default=2_0 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
__UpperCAmelCase : int =field(
default=0 ,metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} ,)
__UpperCAmelCase : int =field(default=1 ,metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""train"""
__UpperCAmelCase : Optional[Any] ="""dev"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : SquadDataTrainingArguments
__UpperCAmelCase : List[SquadFeatures]
__UpperCAmelCase : Split
__UpperCAmelCase : bool
def __init__( self , __a , __a , __a = None , __a = Split.train , __a = False , __a = None , __a = "pt" , ):
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__a , __a ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = "v2" if args.version_2_with_negative else "v1"
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["features"]
__lowerCAmelCase = self.old_features.get("dataset" , __a )
__lowerCAmelCase = self.old_features.get("examples" , __a )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__a , )
__lowerCAmelCase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , __a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
return len(self.features )
def __getitem__( self , __a ):
# Convert to Tensors and build dataset
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 636 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCAmelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def snake_case ( self ):
__lowerCAmelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
__lowerCAmelCase = get_activation("gelu" )
__lowerCAmelCase = get_activation("gelu_10" )
__lowerCAmelCase = torch_builtin(__a )
__lowerCAmelCase = geluaa(__a )
__lowerCAmelCase = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case ( self ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__a ):
get_activation("bogus" )
with self.assertRaises(__a ):
get_activation(__a )
def snake_case ( self ):
__lowerCAmelCase = get_activation("gelu" )
__lowerCAmelCase = 1
__lowerCAmelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
__lowerCAmelCase = acta.a
| 636 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case_ ( lowerCAmelCase ):
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase , 'depth_multiplier' ) )
class snake_case_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.25 , __lowerCAmelCase=8 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=32 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="relu6" , __lowerCAmelCase=1_280 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , ):
SCREAMING_SNAKE_CASE_ : Any = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Any = num_channels
SCREAMING_SNAKE_CASE_ : int = image_size
SCREAMING_SNAKE_CASE_ : Any = depth_multiplier
SCREAMING_SNAKE_CASE_ : str = depth_divisible_by
SCREAMING_SNAKE_CASE_ : Dict = min_depth
SCREAMING_SNAKE_CASE_ : int = expand_ratio
SCREAMING_SNAKE_CASE_ : Optional[int] = tf_padding
SCREAMING_SNAKE_CASE_ : Dict = output_stride
SCREAMING_SNAKE_CASE_ : Optional[Any] = first_layer_is_expansion
SCREAMING_SNAKE_CASE_ : int = finegrained_output
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE_ : Tuple = classifier_dropout_prob
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : List[Any] = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = scope
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = MobileNetVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = self.num_labels
SCREAMING_SNAKE_CASE_ : int = MobileNetVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = MobileNetVaForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE_ : int = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase : Any = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Dict = (
{
'feature-extraction': MobileNetVaModel,
'image-classification': MobileNetVaForImageClassification,
'image-segmentation': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = False
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
__lowerCamelCase : Union[str, Any] = False
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE_ : Any = MobileNetVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def __A ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def __A ( self ):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def __A ( self ):
pass
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __A ( self ):
def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : str = 16
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def __A ( self ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = MobileNetVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __A ( self ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : str = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(**__lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ : str = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
SCREAMING_SNAKE_CASE_ : Tuple = model.to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : str = image_processor(images=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[
[[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]],
[[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]],
[[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]],
] , device=__lowerCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 311 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__: List[str] = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Optional[int] = ["DPTFeatureExtractor"]
lowerCAmelCase__: Any = ["DPTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__: Tuple = [
"DPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DPTForDepthEstimation",
"DPTForSemanticSegmentation",
"DPTModel",
"DPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 | 1 |
"""simple docstring"""
def lowercase__ ( lowerCamelCase : str ) -> str:
return " ".join(
"".join(word[::-1] ) if len(lowerCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 308 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
__UpperCAmelCase = logging.get_logger(__name__)
@dataclass
class lowercase_ ( a_ ):
__magic_name__ : Union[str, Any] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : List[str] , **_lowercase : Optional[int] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase__ : Optional[int] = deprecated_arg[3:]
setattr(self , _lowercase , not kwargs.pop(_lowercase ) )
logger.warning(
f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"
f" {positive_arg}={kwargs[positive_arg]}" )
lowerCAmelCase__ : Any = kwargs.pop("torchscript" , self.torchscript )
lowerCAmelCase__ : Optional[int] = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
lowerCAmelCase__ : Optional[Any] = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**_lowercase )
__magic_name__ : bool = field(default=a_ , metadata={"""help""": """Trace the models using torchscript"""} )
__magic_name__ : bool = field(default=a_ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} )
__magic_name__ : str = field(
default="""O1""" , metadata={
"""help""": (
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """
"""See details at https://nvidia.github.io/apex/amp.html"""
)
} , )
@cached_property
def _lowerCAmelCase ( self : List[Any] ):
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
lowerCAmelCase__ : str = torch.device("cpu" )
lowerCAmelCase__ : Optional[int] = 0
elif is_torch_tpu_available():
lowerCAmelCase__ : Any = xm.xla_device()
lowerCAmelCase__ : Any = 0
else:
lowerCAmelCase__ : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
lowerCAmelCase__ : Optional[int] = torch.cuda.device_count()
return device, n_gpu
@property
def _lowerCAmelCase ( self : int ):
return is_torch_tpu_available() and self.tpu
@property
def _lowerCAmelCase ( self : Any ):
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def _lowerCAmelCase ( self : str ):
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return self.n_gpu > 0
| 308 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCamelCase_ : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
else:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ):
"""simple docstring"""
A_ : int = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A_ : Optional[int] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A_ : Any = 'cpu'
A_ : Optional[Any] = Path(_UpperCAmelCase )
# VAE DECODER
A_ : List[Any] = AutoencoderKL.from_pretrained(model_path + '/vae' )
A_ : Optional[Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
A_ : Dict = vae_decoder.decode
onnx_export(
_UpperCAmelCase , model_args=(
torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_UpperCAmelCase , )
del vae_decoder
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowerCamelCase_ : str = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX') | 716 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCamelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
A_ : Optional[Any] = model_type_to_module_name(_UpperCAmelCase )
A_ : Optional[int] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A_ : Optional[Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ):
"""simple docstring"""
A_ : List[Any] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def lowerCamelCase_ ( cls , snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = kwargs.pop('config' , snake_case_ )
A_ : Optional[Any] = kwargs.pop('trust_remote_code' , snake_case_ )
A_ : Any = True
A_ , A_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(snake_case_ , **snake_case_ )
A_ : List[Any] = config_dict.get('feature_extractor_type' , snake_case_ )
A_ : Optional[int] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
A_ : Optional[Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
A_ : Any = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.feature_extractor_type``
A_ : str = getattr(snake_case_ , 'feature_extractor_type' , snake_case_ )
if hasattr(snake_case_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
A_ : List[str] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
A_ : Union[str, Any] = feature_extractor_class_from_name(snake_case_ )
A_ : Dict = feature_extractor_auto_map is not None
A_ : Tuple = feature_extractor_class is not None or type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING
A_ : Optional[int] = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
A_ : Optional[int] = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
A_ : str = kwargs.pop('code_revision' , snake_case_ )
if os.path.isdir(snake_case_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING:
A_ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(snake_case_ )]
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(snake_case_ , snake_case_ ) | 302 | 0 |
def a (lowerCAmelCase__ = 1_000 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 99 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = "canine"
def __init__( self : str , UpperCamelCase_ : Dict=768 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : int=12 , UpperCamelCase_ : str=3_072 , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Union[str, Any]=16_384 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : Optional[int]=1e-1_2 , UpperCamelCase_ : Dict=0 , UpperCamelCase_ : int=0xe_000 , UpperCamelCase_ : Tuple=0xe_001 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Dict=8 , UpperCamelCase_ : List[Any]=16_384 , UpperCamelCase_ : Optional[int]=128 , **UpperCamelCase_ : Dict , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
lowerCamelCase_ : Dict = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Optional[int] = intermediate_size
lowerCamelCase_ : str = hidden_act
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = type_vocab_size
lowerCamelCase_ : Any = layer_norm_eps
# Character config:
lowerCamelCase_ : Tuple = downsampling_rate
lowerCamelCase_ : Any = upsampling_kernel_size
lowerCamelCase_ : List[Any] = num_hash_functions
lowerCamelCase_ : Dict = num_hash_buckets
lowerCamelCase_ : int = local_transformer_stride
| 501 | 0 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = word.split()
def justify(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
UpperCAmelCase = max_width - width
UpperCAmelCase = len(__lowercase )
if len(__lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCAmelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCAmelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCAmelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__lowercase ):
num_spaces_between_words_list[i] += 1
UpperCAmelCase = []
for i in range(__lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__lowercase )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = 0
for word in words:
if width + len(__lowercase ) + len(__lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__lowercase )
width += len(__lowercase )
else:
# justify the line and add it to result
answer.append(justify(__lowercase , __lowercase , __lowercase ) )
# reset new line and new width
UpperCAmelCase = [word], len(__lowercase )
UpperCAmelCase = max_width - width - len(__lowercase )
answer.append(""" """.join(__lowercase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_1_2,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase_ : int = parser.parse_args()
lowerCAmelCase_ : Optional[int] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 378 | 0 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__magic_name__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A=7_6_8 ):
'''simple docstring'''
super().__init__(_A )
UpperCamelCase : Tuple = proj_size
UpperCamelCase : Optional[int] = CLIPVisionModel(_A )
UpperCamelCase : Union[str, Any] = PaintByExampleMapper(_A )
UpperCamelCase : int = nn.LayerNorm(config.hidden_size )
UpperCamelCase : str = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _a ( self , _A , _A=False ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model(pixel_values=_A )
UpperCamelCase : Tuple = clip_output.pooler_output
UpperCamelCase : List[Any] = self.mapper(latent_states[:, None] )
UpperCamelCase : List[Any] = self.final_layer_norm(_A )
UpperCamelCase : Union[str, Any] = self.proj_out(_A )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowercase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Any = (config.num_hidden_layers + 1) // 5
UpperCamelCase : Optional[Any] = config.hidden_size
UpperCamelCase : int = 1
UpperCamelCase : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(_A , _A , _A , activation_fn="""gelu""" , attention_bias=_A )
for _ in range(_A )
] )
def _a ( self , _A ):
'''simple docstring'''
for block in self.blocks:
UpperCamelCase : Union[str, Any] = block(_A )
return hidden_states
| 102 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The column name of the images in the files."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __a ( self :List[str] ):
UpperCamelCase__ :Optional[Any] = {}
if self.train_dir is not None:
UpperCamelCase__ :int = self.train_dir
if self.validation_dir is not None:
UpperCamelCase__ :List[str] = self.validation_dir
UpperCamelCase__ :Optional[int] = data_files if data_files else None
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : str = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
UpperCamelCase__ :Union[str, Any] = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ :List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCamelCase__ :Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
UpperCamelCase__ :Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase__ :int = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
UpperCamelCase__ :Optional[Any] = ds["""train"""].train_test_split(data_args.train_val_split )
UpperCamelCase__ :Union[str, Any] = split["""train"""]
UpperCamelCase__ :Any = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :Any = ViTMAEConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Optional[Any] = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCamelCase__ :str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCamelCase__ :Any = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[int] = ViTMAEForPreTraining(lowercase__ )
if training_args.do_train:
UpperCamelCase__ :Optional[Any] = ds["""train"""].column_names
else:
UpperCamelCase__ :Union[str, Any] = ds["""validation"""].column_names
if data_args.image_column_name is not None:
UpperCamelCase__ :Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
UpperCamelCase__ :Optional[Any] = """image"""
elif "img" in column_names:
UpperCamelCase__ :List[str] = """img"""
else:
UpperCamelCase__ :List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCamelCase__ :List[str] = image_processor.size["""shortest_edge"""]
else:
UpperCamelCase__ :int = (image_processor.size["""height"""], image_processor.size["""width"""])
UpperCamelCase__ :Any = Compose(
[
Lambda(lambda lowercase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(lowercase__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase__ : Tuple ):
UpperCamelCase__ :List[Any] = [transforms(lowercase__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
UpperCamelCase__ :Optional[int] = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
UpperCamelCase__ :Optional[Any] = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Compute absolute learning rate
UpperCamelCase__ :Tuple = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCamelCase__ :Any = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
UpperCamelCase__ :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ :int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ :Dict = last_checkpoint
UpperCamelCase__ :Union[str, Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ :int = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
# Write model card and (optionally) push to hub
UpperCamelCase__ :Optional[int] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def A ( lowercase__ : Union[str, Any] ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 45 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : List[str]=False, UpperCAmelCase : List[str]=False ) -> Any:
A = 'backbone.' if is_semantic else ''
A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'{prefix}cls_token', 'beit.embeddings.cls_token'),
(f'{prefix}patch_embed.proj.weight', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'{prefix}patch_embed.proj.bias', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'{prefix}pos_embed', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def snake_case ( UpperCAmelCase : int, UpperCAmelCase : Optional[Any], UpperCAmelCase : Dict=False, UpperCAmelCase : Any=False ) -> int:
for i in range(config.num_hidden_layers ):
A = 'backbone.' if is_semantic else ''
# queries, keys and values
A = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' )
A = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' )
A = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' )
A = in_proj_weight[
: config.hidden_size, :
]
A = q_bias
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' )
A = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' )
A = gamma_a
A = gamma_a
def snake_case ( UpperCAmelCase : Any, UpperCAmelCase : Optional[int], UpperCAmelCase : Tuple ) -> int:
A = dct.pop(UpperCAmelCase )
A = val
def snake_case ( ) -> List[Any]:
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(UpperCAmelCase, stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case ( UpperCAmelCase : List[str], UpperCAmelCase : Dict, UpperCAmelCase : Tuple=False ) -> List[str]:
A = False if 'rvlcdip' in checkpoint_url else True
A = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase, use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A = 10_24
A = 40_96
A = 24
A = 16
# labels
if "rvlcdip" in checkpoint_url:
A = 16
A = 'huggingface/label-files'
A = 'rvlcdip-id2label.json'
A = json.load(open(hf_hub_download(UpperCAmelCase, UpperCAmelCase, repo_type='dataset' ), 'r' ) )
A = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A = torch.hub.load_state_dict_from_url(UpperCAmelCase, map_location='cpu' )['model']
A = create_rename_keys(UpperCAmelCase, has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase, UpperCAmelCase, has_lm_head=UpperCAmelCase )
# load HuggingFace model
A = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
A = BeitImageProcessor(
size=config.image_size, resample=PILImageResampling.BILINEAR, do_center_crop=UpperCAmelCase )
A = prepare_img()
A = image_processor(images=UpperCAmelCase, return_tensors='pt' )
A = encoding['pixel_values']
A = model(UpperCAmelCase )
A = outputs.logits
# verify logits
A = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
A = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase, UpperCAmelCase ), organization='nielsr', commit_message='Add image processor', use_temp_dir=UpperCAmelCase, )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase, UpperCAmelCase ), organization='nielsr', commit_message='Add model', use_temp_dir=UpperCAmelCase, )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
lowerCAmelCase_ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 716 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCAmelCase_ = '▁'
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = AlbertTokenizer
def __init__( self : str ,_SCREAMING_SNAKE_CASE : int=None ,_SCREAMING_SNAKE_CASE : List[str]=None ,_SCREAMING_SNAKE_CASE : List[str]=True ,_SCREAMING_SNAKE_CASE : str=True ,_SCREAMING_SNAKE_CASE : Optional[int]=False ,_SCREAMING_SNAKE_CASE : List[str]="[CLS]" ,_SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" ,_SCREAMING_SNAKE_CASE : Tuple="<unk>" ,_SCREAMING_SNAKE_CASE : Union[str, Any]="[SEP]" ,_SCREAMING_SNAKE_CASE : Tuple="<pad>" ,_SCREAMING_SNAKE_CASE : Any="[CLS]" ,_SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" ,**_SCREAMING_SNAKE_CASE : Any ,) -> Tuple:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = (
AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ,normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE ,tokenizer_file=_SCREAMING_SNAKE_CASE ,do_lower_case=_SCREAMING_SNAKE_CASE ,remove_space=_SCREAMING_SNAKE_CASE ,keep_accents=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def A( self : int ,_SCREAMING_SNAKE_CASE : List[int] ,_SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A( self : List[Any] ,_SCREAMING_SNAKE_CASE : List[int] ,_SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A( self : Union[str, Any] ,_SCREAMING_SNAKE_CASE : str ,_SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file ,_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 110 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def UpperCamelCase ( _A ) -> list[int]:
if num <= 0:
lowercase : Tuple = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(_A )
lowercase : Tuple = [True] * (num + 1)
lowercase : Any = []
lowercase : Tuple = 2
lowercase : int = int(math.sqrt(_A ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_A )
# Set multiples of start be False
for i in range(start * start , num + 1 , _A ):
if sieve[i] is True:
lowercase : Optional[int] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_A )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 264 |
"""simple docstring"""
def UpperCamelCase ( _A , _A ) -> None:
lowercase : List[Any] = len(_A )
print("""The following activities are selected:""" )
# The first activity is always selected
lowercase : Optional[int] = 0
print(_A , end=""",""" )
# Consider rest of the activities
for j in range(_A ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_A , end=""",""" )
lowercase : str = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = [1, 3, 0, 5, 8, 5]
_lowerCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 264 | 1 |
UpperCamelCase_ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 322 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCamelCase_ = HfApi()
UpperCamelCase_ = {}
# fmt: off
UpperCamelCase_ = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
UpperCamelCase_ = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
UpperCamelCase_ = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
UpperCamelCase_ = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
UpperCamelCase_ = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
UpperCamelCase_ = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
UpperCamelCase_ = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
UpperCamelCase_ = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
UpperCamelCase_ = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
UpperCamelCase_ = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
UpperCamelCase_ = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
UpperCamelCase_ = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
UpperCamelCase_ = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
UpperCamelCase_ = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
UpperCamelCase_ = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
UpperCamelCase_ = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCamelCase_ = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith('''CompVis'''):
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
UpperCamelCase_ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCamelCase_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCamelCase_ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCamelCase_ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 322 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
class __a( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 20 ,_SCREAMING_SNAKE_CASE = 768 ,_SCREAMING_SNAKE_CASE=77 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE = 0.0 ,_SCREAMING_SNAKE_CASE = "silu" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = "linear" ,_SCREAMING_SNAKE_CASE = "prd" ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
super().__init__()
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Dict = attention_head_dim
UpperCAmelCase_ : Tuple = num_attention_heads * attention_head_dim
UpperCAmelCase_ : Dict = additional_embeddings
UpperCAmelCase_ : int = time_embed_dim or inner_dim
UpperCAmelCase_ : List[Any] = embedding_proj_dim or embedding_dim
UpperCAmelCase_ : List[str] = clip_embed_dim or embedding_dim
UpperCAmelCase_ : Union[str, Any] = Timesteps(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,0 )
UpperCAmelCase_ : Any = TimestepEmbedding(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,out_dim=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if embedding_proj_norm_type is None:
UpperCAmelCase_ : List[str] = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ : int = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ : List[Any] = nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if encoder_hid_proj_type is None:
UpperCAmelCase_ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ : List[Any] = nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,_SCREAMING_SNAKE_CASE ) )
if added_emb_type == "prd":
UpperCAmelCase_ : Dict = nn.Parameter(torch.zeros(1 ,1 ,_SCREAMING_SNAKE_CASE ) )
elif added_emb_type is None:
UpperCAmelCase_ : Dict = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,dropout=_SCREAMING_SNAKE_CASE ,activation_fn='''gelu''' ,attention_bias=_SCREAMING_SNAKE_CASE ,)
for d in range(_SCREAMING_SNAKE_CASE )
] )
if norm_in_type == "layer":
UpperCAmelCase_ : int = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
elif norm_in_type is None:
UpperCAmelCase_ : Dict = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ : List[str] = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = nn.Linear(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-1_00_00.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ : Optional[int] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' ,_SCREAMING_SNAKE_CASE ,persistent=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.zeros(1 ,_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[Any] = nn.Parameter(torch.zeros(1 ,_SCREAMING_SNAKE_CASE ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> str:
UpperCAmelCase_ : str = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
UpperCAmelCase_ : Union[str, Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : str = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
self.set_attn_processor(AttnProcessor() )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = True ,) -> Any:
UpperCAmelCase_ : List[str] = hidden_states.shape[0]
UpperCAmelCase_ : List[Any] = timestep
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : Tuple = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : Any = timesteps * torch.ones(_SCREAMING_SNAKE_CASE ,dtype=timesteps.dtype ,device=timesteps.device )
UpperCAmelCase_ : Tuple = self.time_proj(_SCREAMING_SNAKE_CASE )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ : Any = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ : Dict = self.time_embedding(_SCREAMING_SNAKE_CASE )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ : List[str] = self.embedding_proj_norm(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.embedding_proj(_SCREAMING_SNAKE_CASE )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ : Any = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
UpperCAmelCase_ : str = self.proj_in(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : List[str] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_SCREAMING_SNAKE_CASE )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ : Tuple = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ : Any = hidden_states[:, None, :]
UpperCAmelCase_ : List[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ : Union[str, Any] = self.prd_embedding.to(hidden_states.dtype ).expand(_SCREAMING_SNAKE_CASE ,-1 ,-1 )
additional_embeds.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = torch.cat(
_SCREAMING_SNAKE_CASE ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ : Union[str, Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ : List[Any] = F.pad(
_SCREAMING_SNAKE_CASE ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
UpperCAmelCase_ : str = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
UpperCAmelCase_ : Optional[int] = F.pad(_SCREAMING_SNAKE_CASE ,(0, self.additional_embeddings) ,value=0.0 )
UpperCAmelCase_ : str = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ : int = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ : Dict = self.norm_in(_SCREAMING_SNAKE_CASE )
for block in self.transformer_blocks:
UpperCAmelCase_ : Optional[int] = block(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.norm_out(_SCREAMING_SNAKE_CASE )
if self.prd_embedding is not None:
UpperCAmelCase_ : Optional[int] = hidden_states[:, -1]
else:
UpperCAmelCase_ : str = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ : Union[str, Any] = self.proj_to_clip_embeddings(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Dict = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents | 30 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a = field(
default=A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a = field(
default=A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = field(default=A , metadata={"""help""": """The input training data file (a text file)."""} )
__a = field(
default=A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a = field(
default=A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__a = field(
default=A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a = field(
default=A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.train_file is not None:
__UpperCAmelCase : List[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__UpperCAmelCase : List[str] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__a = 42
__a = True
__a = None
__a = None
def __call__( self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : List[str] = """label""" if """label""" in features[0].keys() else """labels"""
__UpperCAmelCase : Union[str, Any] = [feature.pop(UpperCamelCase ) for feature in features]
__UpperCAmelCase : str = len(UpperCamelCase )
__UpperCAmelCase : Dict = len(features[0]["""input_ids"""] )
__UpperCAmelCase : int = [
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase )] for feature in features
]
__UpperCAmelCase : str = list(chain(*UpperCamelCase ) )
__UpperCAmelCase : int = self.tokenizer.pad(
UpperCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
__UpperCAmelCase : Optional[Any] = {k: v.view(UpperCamelCase , UpperCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
__UpperCAmelCase : int = torch.tensor(UpperCamelCase , dtype=torch.intaa )
return batch
def lowerCamelCase ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCAmelCase : Any = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__UpperCAmelCase : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__UpperCAmelCase : str = {}
if data_args.train_file is not None:
__UpperCAmelCase : str = data_args.train_file
if data_args.validation_file is not None:
__UpperCAmelCase : Union[str, Any] = data_args.validation_file
__UpperCAmelCase : List[Any] = data_args.train_file.split(""".""" )[-1]
__UpperCAmelCase : Optional[int] = load_dataset(
_UpperCamelCase , data_files=_UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__UpperCAmelCase : Any = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__UpperCAmelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__UpperCAmelCase : Dict = [f'''ending{i}''' for i in range(4 )]
__UpperCAmelCase : Any = """sent1"""
__UpperCAmelCase : List[str] = """sent2"""
if data_args.max_seq_length is None:
__UpperCAmelCase : List[str] = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
__UpperCAmelCase : str = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__UpperCAmelCase : Optional[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_UpperCamelCase : str ):
__UpperCAmelCase : List[str] = [[context] * 4 for context in examples[context_name]]
__UpperCAmelCase : Union[str, Any] = examples[question_header_name]
__UpperCAmelCase : int = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(_UpperCamelCase )
]
# Flatten out
__UpperCAmelCase : List[str] = list(chain(*_UpperCamelCase ) )
__UpperCAmelCase : List[Any] = list(chain(*_UpperCamelCase ) )
# Tokenize
__UpperCAmelCase : Optional[int] = tokenizer(
_UpperCamelCase , _UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__UpperCAmelCase : List[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__UpperCAmelCase : Optional[int] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
__UpperCAmelCase : Union[str, Any] = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
__UpperCAmelCase : List[str] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__UpperCAmelCase : int = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__UpperCAmelCase : Dict = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
__UpperCAmelCase : Any = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
__UpperCAmelCase : Any = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__UpperCAmelCase : Optional[int] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_UpperCamelCase : Dict ):
__UpperCAmelCase ,__UpperCAmelCase : List[str] = eval_predictions
__UpperCAmelCase : Optional[int] = np.argmax(_UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__UpperCAmelCase : Tuple = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
# Training
if training_args.do_train:
__UpperCAmelCase : str = None
if training_args.resume_from_checkpoint is not None:
__UpperCAmelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCAmelCase : List[str] = last_checkpoint
__UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
__UpperCAmelCase : Dict = train_result.metrics
__UpperCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
__UpperCAmelCase : List[Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""train""" , _UpperCamelCase )
trainer.save_metrics("""train""" , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__UpperCAmelCase : List[str] = trainer.evaluate()
__UpperCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics("""eval""" , _UpperCamelCase )
trainer.save_metrics("""eval""" , _UpperCamelCase )
__UpperCAmelCase : Tuple = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
def lowerCamelCase ( _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 139 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_)
class snake_case__ ( lowercase_):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True})
lowerCamelCase : ClassVar[Features] = Features({"text": Value("string")})
lowerCamelCase : ClassVar[Features] = Features({"summary": Value("string")})
lowerCamelCase : str = "text"
lowerCamelCase : str = "summary"
@property
def __lowercase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 291 |
import math
lowerCamelCase__ = 10
lowerCamelCase__ = 7
lowerCamelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( snake_case__ : int = 20 ):
'''simple docstring'''
__snake_case :Dict = math.comb(snake_case__ ,snake_case__ )
__snake_case :int = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,snake_case__ )
__snake_case :List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 291 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowerCAmelCase__ = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 41 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCAmelCase_ ( __a ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCAmelCase_ ( __a , __a , __a ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] =XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__a , __a )
# Predict target for test data
SCREAMING_SNAKE_CASE : List[str] =xgb.predict(__a )
SCREAMING_SNAKE_CASE : Tuple =predictions.reshape(len(__a ) , 1 )
return predictions
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] =fetch_california_housing()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] =data_handling(__a )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] =train_test_split(
__a , __a , test_size=0.25 , random_state=1 )
SCREAMING_SNAKE_CASE : Optional[Any] =xgboost(__a , __a , __a )
# Error printing
print(f'Mean Absolute Error : {mean_absolute_error(__a , __a )}' )
print(f'Mean Square Error : {mean_squared_error(__a , __a )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 258 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCamelCase_ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'vision-encoder-decoder'
lowerCamelCase_ = True
def __init__( self : Optional[int] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
lowercase : Optional[Any] =kwargs.pop('''encoder''' )
lowercase : List[Any] =encoder_config.pop('''model_type''' )
lowercase : List[str] =kwargs.pop('''decoder''' )
lowercase : Dict =decoder_config.pop('''model_type''' )
lowercase : Union[str, Any] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : List[str] =AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : str =True
@classmethod
def lowerCamelCase_ ( cls : List[str] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , **UpperCAmelCase__ : Optional[int] ):
'''simple docstring'''
logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowercase : int =True
lowercase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =copy.deepcopy(self.__dict__ )
lowercase : Union[str, Any] =self.encoder.to_dict()
lowercase : Union[str, Any] =self.decoder.to_dict()
lowercase : int =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = version.parse('1.11' )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return 1E-4
@property
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : List[str] =OrderedDict()
lowercase : Tuple ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : Optional[int] ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
lowercase : int ={0: '''batch''', 1: '''encoder_sequence'''}
return common_inputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : "PreTrainedTokenizerBase" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
import torch
lowercase : Optional[Any] =OrderedDict()
lowercase : List[Any] =super().generate_dummy_inputs(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase , lowercase : Optional[int] =dummy_input['''input_ids'''].shape
lowercase : Union[str, Any] =(batch, encoder_sequence, self._config.encoder_hidden_size)
lowercase : List[str] =dummy_input.pop('''input_ids''' )
lowercase : Tuple =dummy_input.pop('''attention_mask''' )
lowercase : Union[str, Any] =torch.zeros(UpperCAmelCase__ )
return common_inputs
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : PretrainedConfig , UpperCAmelCase__ : str = "default" ):
'''simple docstring'''
lowercase : List[Any] =encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
| 88 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Optional[Any]=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=99 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=4 , ):
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : str =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[Any] =use_token_type_ids
lowercase : Tuple =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_attention_heads
lowercase : List[str] =intermediate_size
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =max_position_embeddings
lowercase : Tuple =type_vocab_size
lowercase : Optional[int] =type_sequence_label_size
lowercase : Optional[Any] =initializer_range
lowercase : Optional[int] =num_choices
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =None
if self.use_attention_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Tuple =None
if self.use_token_type_ids:
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : int =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : str =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase : List[str] =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs
lowercase : List[str] =True
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = True
lowerCamelCase_ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =FlaxRobertaModelTester(self )
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Optional[int] =model_class_name.from_pretrained('''roberta-base''' , from_pt=UpperCAmelCase__ )
lowercase : List[Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase__ )
| 88 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase_ = {
"""n_samples""": 6_4,
"""horizon""": 3_2,
"""num_inference_steps""": 2_0,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
UpperCAmelCase_ = """hopper-medium-v2"""
UpperCAmelCase_ = gym.make(env_name)
UpperCAmelCase_ = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
UpperCAmelCase_ = env.reset()
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1_0_0_0
UpperCAmelCase_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase_ = pipeline(obs, planning_horizon=3_2)
# execute action in environment
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = env.step(denorm_actions)
UpperCAmelCase_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase_ = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 2 |
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
if n == 1 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase_: Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = 0
UpperCAmelCase_: str = 2
while digits < n:
index += 1
UpperCAmelCase_: Union[str, Any] = len(str(fibonacci(lowerCAmelCase__ ) ) )
return index
def lowerCAmelCase_ (lowerCAmelCase__: int = 1_0_0_0 ):
"""simple docstring"""
return fibonacci_digits_index(lowerCAmelCase__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 556 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCamelCase : List[str] ={"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'openai-gpt'
SCREAMING_SNAKE_CASE_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _snake_case=4_04_78 , _snake_case=5_12 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1E-5 , _snake_case=0.0_2 , _snake_case="cls_index" , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=0.1 , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = vocab_size
__lowerCamelCase = n_positions
__lowerCamelCase = n_embd
__lowerCamelCase = n_layer
__lowerCamelCase = n_head
__lowerCamelCase = afn
__lowerCamelCase = resid_pdrop
__lowerCamelCase = embd_pdrop
__lowerCamelCase = attn_pdrop
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_range
__lowerCamelCase = summary_type
__lowerCamelCase = summary_use_proj
__lowerCamelCase = summary_activation
__lowerCamelCase = summary_first_dropout
__lowerCamelCase = summary_proj_to_labels
super().__init__(**_snake_case )
| 575 |
'''simple docstring'''
import functools
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = len(A_ )
__lowerCamelCase = len(A_ )
@functools.cache
def min_distance(A_ , A_ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__lowerCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , A_ ) , 1 + min_distance(A_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 575 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : str = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
__magic_name__ = '''layoutlmv3'''
def __init__( self , snake_case__=5_0265 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-5 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__=1024 , snake_case__=128 , snake_case__=128 , snake_case__=True , snake_case__=32 , snake_case__=128 , snake_case__=64 , snake_case__=256 , snake_case__=True , snake_case__=True , snake_case__=True , snake_case__=224 , snake_case__=3 , snake_case__=16 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
vocab_size=snake_case__ , hidden_size=snake_case__ , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , intermediate_size=snake_case__ , hidden_act=snake_case__ , hidden_dropout_prob=snake_case__ , attention_probs_dropout_prob=snake_case__ , max_position_embeddings=snake_case__ , type_vocab_size=snake_case__ , initializer_range=snake_case__ , layer_norm_eps=snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ , )
_lowerCAmelCase : Tuple = max_ad_position_embeddings
_lowerCAmelCase : Dict = coordinate_size
_lowerCAmelCase : Tuple = shape_size
_lowerCAmelCase : Optional[int] = has_relative_attention_bias
_lowerCAmelCase : List[Any] = rel_pos_bins
_lowerCAmelCase : Union[str, Any] = max_rel_pos
_lowerCAmelCase : List[str] = has_spatial_attention_bias
_lowerCAmelCase : Optional[Any] = rel_ad_pos_bins
_lowerCAmelCase : str = max_rel_ad_pos
_lowerCAmelCase : str = text_embed
_lowerCAmelCase : Dict = visual_embed
_lowerCAmelCase : Tuple = input_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = classifier_dropout
class UpperCamelCase__ ( _a ):
"""simple docstring"""
__magic_name__ = version.parse("1.12" )
@property
def a ( self ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a ( self ):
'''simple docstring'''
return 1E-5
@property
def a ( self ):
'''simple docstring'''
return 12
def a ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = False , snake_case__ = None , snake_case__ = 3 , snake_case__ = 40 , snake_case__ = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , 'apply_ocr' , snake_case__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase : Any = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase : List[str] = processor.tokenizer.num_special_tokens_to_add(snake_case__ )
_lowerCAmelCase : int = compute_effective_axis_dimension(
snake_case__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case__ )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase : Tuple = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
_lowerCAmelCase : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
_lowerCAmelCase : Any = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
_lowerCAmelCase : Any = dict(
processor(
snake_case__ , text=snake_case__ , boxes=snake_case__ , return_tensors=snake_case__ , ) )
return inputs
| 444 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_SCREAMING_SNAKE_CASE = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :str, snake_case :int, snake_case :Dict=7, snake_case :List[Any]=3, snake_case :int=18, snake_case :List[str]=30, snake_case :str=400, snake_case :int=None, snake_case :List[Any]=True, snake_case :Optional[int]=True, snake_case :Union[str, Any]=None, ):
"""simple docstring"""
_lowercase =size if size is not None else {'height': 20, 'width': 20}
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =min_resolution
_lowercase =max_resolution
_lowercase =size
_lowercase =do_normalize
_lowercase =do_convert_rgb
_lowercase =[512, 1024, 2048, 4096]
_lowercase =patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase ='https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_lowercase =Image.open(requests.get(snake_case, stream=snake_case).raw).convert('RGB')
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =PixaStructImageProcessingTester(self)
@property
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.image_processor_tester.prepare_dummy_image()
_lowercase =self.image_processing_class(**self.image_processor_dict)
_lowercase =2048
_lowercase =image_processor(snake_case, return_tensors='pt', max_patches=snake_case)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean(), torch.tensor(0.0_6_0_6), atol=1e-3, rtol=1e-3))
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_lowercase =True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case):
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
_lowercase ='Hello'
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case, header_text=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case, header_text=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case, numpify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, np.ndarray)
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case, torchify=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, torch.Tensor)
# Test not batched input
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =PixaStructImageProcessingTester(self, num_channels=4)
_lowercase =3
@property
def UpperCamelCase__ ( self :str):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(snake_case, 'do_normalize'))
self.assertTrue(hasattr(snake_case, 'do_convert_rgb'))
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester, equal_resolution=snake_case)
for image in image_inputs:
self.assertIsInstance(snake_case, Image.Image)
# Test not batched input
_lowercase =(
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowercase =image_processor(
image_inputs[0], return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (1, max_patch, expected_hidden_dim), )
# Test batched
_lowercase =image_processor(
snake_case, return_tensors='pt', max_patches=snake_case).flattened_patches
self.assertEqual(
encoded_images.shape, (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim), )
| 181 | 0 |
def __lowerCAmelCase ( A_ : List[str] ) -> int:
__UpperCAmelCase = [0] * len(__A )
__UpperCAmelCase = []
__UpperCAmelCase = [1] * len(__A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__A ) ):
if indegree[i] == 0:
queue.append(__A )
while queue:
__UpperCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__UpperCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__A )
print(max(__A ) )
# Adjacency list of Graph
a_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 715 | import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( A_ : int , A_ : Optional[Any] , A_ : Tuple ) -> List[str]:
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def __lowerCAmelCase ( A_ : Optional[int] , A_ : Dict , A_ : Dict , A_ : Optional[int]="attention" ) -> Optional[int]:
__UpperCAmelCase = __UpperCAmelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCAmelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCAmelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCAmelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCAmelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCAmelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCAmelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCAmelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCAmelCase ( A_ : Dict , A_ : Union[str, Any] , A_ : Optional[Any] , A_ : int=False ) -> List[Any]:
if split_mlp_wi:
__UpperCAmelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCAmelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCAmelCase = (wi_a, wi_a)
else:
__UpperCAmelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCAmelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : int , A_ : Optional[Any] , A_ : Optional[Any] ) -> Tuple:
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def __lowerCAmelCase ( A_ : dict , *, A_ : int , A_ : bool , A_ : bool = False ) -> List[str]:
__UpperCAmelCase = traverse_util.flatten_dict(variables["target"] )
__UpperCAmelCase = {"/".join(A_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , A_ )
__UpperCAmelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase = old["token_embedder/embedding"]
# Encoder.
for i in range(A_ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "encoder" , "pre_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(A_ , A_ , "encoder" , "attention" )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "encoder" , "pre_mlp_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase = tax_mlp_lookup(A_ , A_ , "encoder" , A_ )
__UpperCAmelCase = layer_norm
if split_mlp_wi:
__UpperCAmelCase = wi[0].T
__UpperCAmelCase = wi[1].T
else:
__UpperCAmelCase = wi.T
__UpperCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase = tax_relpos_bias_lookup(
A_ , A_ , "encoder" ).T
__UpperCAmelCase = old["encoder/encoder_norm/scale"]
if not scalable_attention:
__UpperCAmelCase = tax_relpos_bias_lookup(
A_ , 0 , "encoder" ).T
__UpperCAmelCase = tax_relpos_bias_lookup(
A_ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(A_ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_self_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(A_ , A_ , "decoder" , "self_attention" )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_cross_attention_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = tax_attention_lookup(A_ , A_ , "decoder" , "encoder_decoder_attention" )
__UpperCAmelCase = layer_norm
__UpperCAmelCase = k.T
__UpperCAmelCase = o.T
__UpperCAmelCase = q.T
__UpperCAmelCase = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase = tax_layer_norm_lookup(A_ , A_ , "decoder" , "pre_mlp_layer_norm" )
__UpperCAmelCase , __UpperCAmelCase = tax_mlp_lookup(A_ , A_ , "decoder" , A_ )
__UpperCAmelCase = layer_norm
if split_mlp_wi:
__UpperCAmelCase = wi[0].T
__UpperCAmelCase = wi[1].T
else:
__UpperCAmelCase = wi.T
__UpperCAmelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase = tax_relpos_bias_lookup(A_ , A_ , "decoder" ).T
__UpperCAmelCase = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase = old["decoder/logits_dense/kernel"].T
return new
def __lowerCAmelCase ( A_ : Tuple , A_ : bool ) -> List[Any]:
__UpperCAmelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
__UpperCAmelCase = state_dict["shared.weight"]
return state_dict
def __lowerCAmelCase ( A_ : Optional[Any] , A_ : int , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] ) -> Optional[int]:
__UpperCAmelCase = checkpoints.load_tax_checkpoint(A_ )
__UpperCAmelCase = convert_tax_to_pytorch(
A_ , num_layers=config.num_layers , is_encoder_only=A_ , scalable_attention=A_ )
__UpperCAmelCase = make_state_dict(A_ , A_ )
model.load_state_dict(A_ , strict=A_ )
def __lowerCAmelCase ( A_ : List[str] , A_ : List[Any] , A_ : List[str] , A_ : bool = False , A_ : bool = False , ) -> Optional[int]:
__UpperCAmelCase = MTaConfig.from_json_file(A_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase = UMTaEncoderModel(A_ )
else:
__UpperCAmelCase = UMTaForConditionalGeneration(A_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(A_ , A_ , A_ , A_ , A_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A_ )
# Verify that we can load the checkpoint.
model.from_pretrained(A_ )
print("Done" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
a_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 286 | 0 |
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 46 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = GPTSanJapaneseTokenizer
a : Optional[Any] = False
a : List[str] = {'do_clean_text': False, 'add_prefix_space': False}
def UpperCAmelCase ( self : Tuple ) -> Any:
super().setUp()
# fmt: off
__UpperCAmelCase : Tuple = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__UpperCAmelCase : Dict = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowercase ) )
def UpperCAmelCase ( self : Tuple , **__lowercase : int ) -> Any:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def UpperCAmelCase ( self : str , __lowercase : Union[str, Any] ) -> Any:
__UpperCAmelCase : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[int] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : int = self.get_input_output_texts(__lowercase )
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__UpperCAmelCase : Dict = tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
return text, ids
def UpperCAmelCase ( self : int ) -> Optional[Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCAmelCase ( self : Dict ) -> Tuple:
pass # TODO add if relevant
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : List[str] = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。 こんばんは、㔺界。"""
__UpperCAmelCase : Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids without special tokens
__UpperCAmelCase : List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
# Testing conversion to ids with special tokens
__UpperCAmelCase : List[Any] = tokens + [tokenizer.unk_token]
__UpperCAmelCase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def UpperCAmelCase ( self : Tuple ) -> Dict:
__UpperCAmelCase : int = self.get_tokenizer()
# Testing tokenization
__UpperCAmelCase : Tuple = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__UpperCAmelCase : int = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__UpperCAmelCase : Tuple = tokenizer.encode(__lowercase )
__UpperCAmelCase : int = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : int ) -> Optional[int]:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : List[Any] = """こんにちは、世界。"""
__UpperCAmelCase : Optional[int] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : List[Any] = """こんにちは、世界。こんばんは、世界。😀"""
__UpperCAmelCase : List[str] = tokenizer.encode(prefix_text + input_text )
__UpperCAmelCase : List[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__UpperCAmelCase : Any = tokenizer.encode(__lowercase , prefix_text=__lowercase )
__UpperCAmelCase : Optional[int] = tokenizer.decode(__lowercase )
__UpperCAmelCase : Any = tokenizer.decode(__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : Any ) -> str:
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__UpperCAmelCase : int = """こんにちは、世界。"""
__UpperCAmelCase : List[Any] = """こんばんは、㔺界。😀"""
__UpperCAmelCase : Union[str, Any] = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : int = len(tokenizer.encode(__lowercase ) ) - 2
__UpperCAmelCase : List[Any] = [1] + [0] * (len_prefix + len_text + 1)
__UpperCAmelCase : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__UpperCAmelCase : List[Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__UpperCAmelCase : Union[str, Any] = tokenizer(prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Optional[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__UpperCAmelCase : Tuple = tokenizer(__lowercase , prefix_text=__lowercase ).token_type_ids
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def UpperCAmelCase ( self : List[str] ) -> int:
__UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""あンいワ""" )
__UpperCAmelCase : Tuple = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__UpperCAmelCase : Optional[int] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertEqual(tokenizer.decode(__lowercase ) , tokenizer.decode(__lowercase ) )
self.assertNotEqual(__lowercase , __lowercase )
self.assertNotEqual(__lowercase , __lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
__UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__UpperCAmelCase : List[Any] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__UpperCAmelCase : int = tokenizer(__lowercase , padding=__lowercase )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_encode_plus(__lowercase , padding=__lowercase )
# fmt: off
__UpperCAmelCase : Optional[int] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__UpperCAmelCase : Tuple = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__UpperCAmelCase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowercase )
self.assertListEqual(x_token.token_type_ids , __lowercase )
self.assertListEqual(x_token.attention_mask , __lowercase )
self.assertListEqual(x_token_a.input_ids , __lowercase )
self.assertListEqual(x_token_a.token_type_ids , __lowercase )
self.assertListEqual(x_token_a.attention_mask , __lowercase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCAmelCase ( self : Any ) -> int:
# tokenizer has no padding token
pass
| 63 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'sentencepiece.model'}
lowerCamelCase__ = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
lowerCamelCase__ = {
'google/rembert': 2_56,
}
class __magic_name__ (lowerCAmelCase__ ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=False , _a=True , _a=True , _a="[CLS]" , _a="[SEP]" , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> str:
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase_ = do_lower_case
lowerCAmelCase_ = remove_space
lowerCAmelCase_ = keep_accents
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def __a ( self ) -> Dict:
return len(self.sp_model )
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = d
lowerCAmelCase_ = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __a ( self , _a , _a=False ) -> Union[str, Any]:
lowerCAmelCase_ = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
return pieces
def __a ( self , _a ) -> Optional[int]:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def __a ( self , _a ) -> Union[str, Any]:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = self.sp_model.decode_pieces(_SCREAMING_SNAKE_CASE )
return out_string
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __a ( self , _a , _a = None ) -> List[int]:
lowerCAmelCase_ = [self.sep_token_id]
lowerCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
lowerCAmelCase_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 709 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = '''RegNetConfig'''
# Base docstring
lowerCamelCase__ = '''facebook/regnet-y-040'''
lowerCamelCase__ = [1, 10_88, 7, 7]
# Image classification docstring
lowerCamelCase__ = '''facebook/regnet-y-040'''
lowerCamelCase__ = '''tabby, tabby cat'''
lowerCamelCase__ = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a = 3 , _a = 1 , _a = 1 , _a = "relu" , ) -> int:
super().__init__()
lowerCAmelCase_ = nn.Convad(
_a , _a , kernel_size=_a , stride=_a , padding=kernel_size // 2 , groups=_a , bias=_a , )
lowerCAmelCase_ = nn.BatchNormad(_a )
lowerCAmelCase_ = ACTaFN[activation] if activation is not None else nn.Identity()
def __a ( self , _a ) -> str:
lowerCAmelCase_ = self.convolution(_a )
lowerCAmelCase_ = self.normalization(_a )
lowerCAmelCase_ = self.activation(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a ) -> Dict:
super().__init__()
lowerCAmelCase_ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCAmelCase_ = config.num_channels
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCAmelCase_ = self.embedder(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a = 2 ) -> int:
super().__init__()
lowerCAmelCase_ = nn.Convad(_a , _a , kernel_size=1 , stride=_a , bias=_a )
lowerCAmelCase_ = nn.BatchNormad(_a )
def __a ( self , _a ) -> Tensor:
lowerCAmelCase_ = self.convolution(_a )
lowerCAmelCase_ = self.normalization(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a ) -> str:
super().__init__()
lowerCAmelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase_ = nn.Sequential(
nn.Convad(_a , _a , kernel_size=1 ) , nn.ReLU() , nn.Convad(_a , _a , kernel_size=1 ) , nn.Sigmoid() , )
def __a ( self , _a ) -> int:
# b c h w -> b c 1 1
lowerCAmelCase_ = self.pooler(_a )
lowerCAmelCase_ = self.attention(_a )
lowerCAmelCase_ = hidden_state * attention
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a , _a = 1 ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ = in_channels != out_channels or stride != 1
lowerCAmelCase_ = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ = (
RegNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ = nn.Sequential(
RegNetConvLayer(_a , _a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , stride=_a , groups=_a , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , kernel_size=1 , activation=_a ) , )
lowerCAmelCase_ = ACTaFN[config.hidden_act]
def __a ( self , _a ) -> List[str]:
lowerCAmelCase_ = hidden_state
lowerCAmelCase_ = self.layer(_a )
lowerCAmelCase_ = self.shortcut(_a )
hidden_state += residual
lowerCAmelCase_ = self.activation(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a , _a = 1 ) -> int:
super().__init__()
lowerCAmelCase_ = in_channels != out_channels or stride != 1
lowerCAmelCase_ = max(1 , out_channels // config.groups_width )
lowerCAmelCase_ = (
RegNetShortCut(_a , _a , stride=_a ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ = nn.Sequential(
RegNetConvLayer(_a , _a , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_a , _a , stride=_a , groups=_a , activation=config.hidden_act ) , RegNetSELayer(_a , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_a , _a , kernel_size=1 , activation=_a ) , )
lowerCAmelCase_ = ACTaFN[config.hidden_act]
def __a ( self , _a ) -> Union[str, Any]:
lowerCAmelCase_ = hidden_state
lowerCAmelCase_ = self.layer(_a )
lowerCAmelCase_ = self.shortcut(_a )
hidden_state += residual
lowerCAmelCase_ = self.activation(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a , _a , _a , _a = 2 , _a = 2 , ) -> Any:
super().__init__()
lowerCAmelCase_ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
lowerCAmelCase_ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_a , _a , _a , stride=_a , ) , *[layer(_a , _a , _a ) for _ in range(depth - 1 )] , )
def __a ( self , _a ) -> Tuple:
lowerCAmelCase_ = self.layers(_a )
return hidden_state
class __magic_name__ (nn.Module ):
def __init__( self , _a ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_a , config.depths[1:] ):
self.stages.append(RegNetStage(_a , _a , _a , depth=_a ) )
def __a ( self , _a , _a = False , _a = True ) -> BaseModelOutputWithNoAttention:
lowerCAmelCase_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ = hidden_states + (hidden_state,)
lowerCAmelCase_ = stage_module(_a )
if output_hidden_states:
lowerCAmelCase_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a , hidden_states=_a )
class __magic_name__ (__lowercase ):
lowerCamelCase__ = RegNetConfig
lowerCamelCase__ = '''regnet'''
lowerCamelCase__ = '''pixel_values'''
lowerCamelCase__ = True
def __a ( self , _a ) -> str:
if isinstance(_a , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __a ( self , _a , _a=False ) -> str:
if isinstance(_a , _a ):
lowerCAmelCase_ = value
lowerCamelCase__ = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCamelCase__ = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ (__lowercase ):
def __init__( self , _a ) -> int:
super().__init__(_a )
lowerCAmelCase_ = config
lowerCAmelCase_ = RegNetEmbeddings(_a )
lowerCAmelCase_ = RegNetEncoder(_a )
lowerCAmelCase_ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_a , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , _a , _a = None , _a = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ = self.embedder(_a )
lowerCAmelCase_ = self.encoder(
_a , output_hidden_states=_a , return_dict=_a )
lowerCAmelCase_ = encoder_outputs[0]
lowerCAmelCase_ = self.pooler(_a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a , pooler_output=_a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __lowercase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ (__lowercase ):
def __init__( self , _a ) -> List[str]:
super().__init__(_a )
lowerCAmelCase_ = config.num_labels
lowerCAmelCase_ = RegNetModel(_a )
# classification head
lowerCAmelCase_ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , _a = None , _a = None , _a = None , _a = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCAmelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ = self.regnet(_a , output_hidden_states=_a , return_dict=_a )
lowerCAmelCase_ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ = self.classifier(_a )
lowerCAmelCase_ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ = "single_label_classification"
else:
lowerCAmelCase_ = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase_ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ = loss_fct(_a , _a )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ = CrossEntropyLoss()
lowerCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ = BCEWithLogitsLoss()
lowerCAmelCase_ = loss_fct(_a , _a )
if not return_dict:
lowerCAmelCase_ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a , logits=_a , hidden_states=outputs.hidden_states )
| 226 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCAmelCase_ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class _A ( unittest.TestCase ):
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
lowercase : List[Any] = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def __a ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase : Tuple = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def __a ( self : Any , _A : Dict , _A : List[str] , _A : int , _A : int=None ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase : List[str] = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase : List[str] = black.format_str(_A , mode=_A )
lowercase : Optional[int] = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : List[str] = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
lowercase : Dict = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , ) | 217 |
import unittest
from knapsack import greedy_knapsack as kp
class _A ( unittest.TestCase ):
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Dict = [10, 20, 30, 40, 50, 60]
lowercase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowercase : Optional[int] = 100
self.assertEqual(kp.calc_profit(_A , _A , _A ) , 210 )
def __a ( self : Dict ) -> int:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : str ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Weight can not be negative.''' )
def __a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(_A , '''Profit can not be negative.''' )
def __a ( self : Tuple ) -> Dict:
"""simple docstring"""
self.assertRaisesRegex(_A , '''max_weight must greater than zero.''' )
def __a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
self.assertRaisesRegex(
_A , '''The length of profit and weight must be same.''' )
if __name__ == "__main__":
unittest.main() | 217 | 1 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__: Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__: List[Any] = {"vocab_file": "spiece.model"}
lowerCAmelCase__: int = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
lowerCAmelCase__: Any = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Any = kwargs.get('name_or_path' )
if name_or_path is None:
logger.warning(
'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'
' you are testing the model, this can safely be ignored' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'None'
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
SCREAMING_SNAKE_CASE_ : Optional[int] = '<|endoftext|>' if eos_token is None else eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = '<unk>' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
SCREAMING_SNAKE_CASE_ : int = unk_token if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Tuple = eos_token if bos_token is None else bos_token
else:
SCREAMING_SNAKE_CASE_ : List[str] = '<pad>' if pad_token is None else pad_token
SCREAMING_SNAKE_CASE_ : Optional[int] = '<s>' if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
SCREAMING_SNAKE_CASE_ : Dict = remove_space
SCREAMING_SNAKE_CASE_ : Any = keep_accents
SCREAMING_SNAKE_CASE_ : Dict = vocab_file
SCREAMING_SNAKE_CASE_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
SCREAMING_SNAKE_CASE_ : int = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', ''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
SCREAMING_SNAKE_CASE_ : Tuple = re.compile(
F'[{"".join(map(__lowerCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]' )
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : str = None
return state
def __setstate__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __A ( self ):
return len(self.sp_model )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.non_printing_characters_re.sub('' , __lowerCAmelCase )
# Normalize whitespaces
SCREAMING_SNAKE_CASE_ : List[Any] = ''.join([char if char not in self.whitespaces else ' ' for char in text] )
# NFC Unicode normalization
SCREAMING_SNAKE_CASE_ : Dict = unicodedata.normalize('NFC' , __lowerCAmelCase )
return text
def __A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = self.preprocess_text(__lowerCAmelCase )
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
return self.sp_model.PieceToId(__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
return self.sp_model.IdToPiece(__lowerCAmelCase )
@staticmethod
def __A ( __lowerCAmelCase ):
return out_string
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : int = ''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : str = []
else:
current_sub_tokens.append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(
__lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = self.preprocess_text(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = self.sp_model.encode(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : str = [self.preprocess_text(__lowerCAmelCase ) for t in text]
SCREAMING_SNAKE_CASE_ : List[Any] = self.sp_model.encode(__lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(__lowerCAmelCase )
return token_ids
def __A ( self , __lowerCAmelCase ):
return self.sp_model.decode(__lowerCAmelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
SCREAMING_SNAKE_CASE_ : Optional[int] = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__lowerCAmelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__lowerCAmelCase )
| 707 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
SCREAMING_SNAKE_CASE_ : int = f'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE_ : Tuple = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = os.path.join(SCREAMING_SNAKE_CASE , 'README.md' )
print(f'Generating {path}' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
lowerCAmelCase__: str = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase__: Optional[Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__: str = model_name.split("-")
lowerCAmelCase__: Any = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 311 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case : str = None
snake_case : Dict = logging.get_logger(__name__)
snake_case : Dict = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
snake_case : List[str] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
snake_case : List[Any] = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
snake_case : List[Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : Dict = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Tuple = ["input_ids", "attention_mask"]
UpperCamelCase : Any = NllbTokenizer
UpperCamelCase : List[int] = []
UpperCamelCase : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
__lowercase = legacy_behaviour
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , legacy_behaviour=__UpperCAmelCase , **__UpperCAmelCase , )
__lowercase = vocab_file
__lowercase = False if not self.vocab_file else True
__lowercase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__lowercase = {
lang_code: self.convert_tokens_to_ids(__UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowercase = src_lang if src_lang is not None else """eng_Latn"""
__lowercase = self.convert_tokens_to_ids(self._src_lang )
__lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowercase = src_lang
__lowercase = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
__lowercase = self.convert_tokens_to_ids(__UpperCAmelCase )
__lowercase = tgt_lang_id
return inputs
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = "eng_Latn" , __UpperCAmelCase = None , __UpperCAmelCase = "fra_Latn" , **__UpperCAmelCase , ):
"""simple docstring"""
__lowercase = src_lang
__lowercase = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.convert_tokens_to_ids(__UpperCAmelCase )
if self.legacy_behaviour:
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase = [self.cur_lang_code]
__lowercase = [self.eos_token_id]
__lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.convert_tokens_to_ids(__UpperCAmelCase )
if self.legacy_behaviour:
__lowercase = []
__lowercase = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase = [self.cur_lang_code]
__lowercase = [self.eos_token_id]
__lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__lowercase = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 566 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : str = {'vocab_file': 'sentencepiece.model'}
snake_case : List[str] = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
}
snake_case : List[str] = {
'google/rembert': 256,
}
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(__UpperCAmelCase )
@property
def __magic_name__ ( self ):
"""simple docstring"""
return len(self.sp_model )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = d
__lowercase = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
__lowercase = self.sp_model.EncodeAsPieces(__UpperCAmelCase )
return pieces
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.PieceToId(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.sp_model.IdToPiece(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.sp_model.decode_pieces(__UpperCAmelCase )
return out_string
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
__lowercase = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 566 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A__ :
def __init__( self :Tuple ) -> Any:
'''simple docstring'''
_a : Dict ={}
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : List[Any] ={}
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :float ) -> None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE )
_a : Any =probability
def __UpperCAmelCase ( self :Optional[int] ) -> list[str]:
'''simple docstring'''
return list(self.connections )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :str ) -> str:
'''simple docstring'''
_a : Dict =0
_a : Tuple =random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : list[tuple[str, str, float]] ,_UpperCAmelCase : int ) -> dict[str, int]:
_a : Optional[int] =MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
_a : Tuple =Counter(graph.get_nodes() )
_a : Optional[int] =start
for _ in range(_UpperCAmelCase ):
_a : Tuple =graph.transition(_UpperCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__: int = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Dict = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
A__: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 506 | 1 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(A__ ):
for j in range(A__ ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) ,end="\t" )
else:
print("INF" ,end="\t" )
print()
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Any = [[float("inf" ) for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
UpperCAmelCase_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(A__ ):
# looping through rows of graph array
for i in range(A__ ):
# looping through columns of graph array
for j in range(A__ ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCAmelCase_ : Dict = dist[i][k] + dist[k][j]
_print_dist(A__ ,A__ )
return dist, v
if __name__ == "__main__":
lowerCamelCase_ = int(input('''Enter number of vertices: '''))
lowerCamelCase_ = int(input('''Enter number of edges: '''))
lowerCamelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCamelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCamelCase_ = int(input('''Enter source:'''))
lowerCamelCase_ = int(input('''Enter destination:'''))
lowerCamelCase_ = float(input('''Enter weight:'''))
lowerCamelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 95 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase_ :
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> str:
pass
def snake_case ( A__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : int = pipeline(
"document-question-answering" , model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
UpperCAmelCase_ : int = INVOICE_URL
UpperCAmelCase_ : Union[str, Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
UpperCAmelCase_ : Optional[Any] = "What is the placebo?"
UpperCAmelCase_ : Tuple = [
{
"image": load_image(lowerCAmelCase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> int:
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(lowerCAmelCase_ , top_k=2 )
self.assertEqual(
lowerCAmelCase_ , [
[
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
{"score": ANY(lowerCAmelCase_ ), "answer": ANY(lowerCAmelCase_ ), "start": ANY(lowerCAmelCase_ ), "end": ANY(lowerCAmelCase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
UpperCAmelCase_ : Dict = INVOICE_URL
UpperCAmelCase_ : int = "How many cats are there?"
UpperCAmelCase_ : Any = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , lowerCAmelCase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Dict = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
# We can optionnally pass directly the words and bounding boxes
UpperCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[Any] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , words=lowerCAmelCase_ , boxes=lowerCAmelCase_ , top_k=2 )
self.assertEqual(lowerCAmelCase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
UpperCAmelCase_ : Dict = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
UpperCAmelCase_ : Optional[Any] = INVOICE_URL
UpperCAmelCase_ : Dict = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
UpperCAmelCase_ : Tuple = INVOICE_URL
UpperCAmelCase_ : Any = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , )
UpperCAmelCase_ : Any = INVOICE_URL
UpperCAmelCase_ : List[str] = "What is the invoice number?"
UpperCAmelCase_ : str = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
UpperCAmelCase_ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
UpperCAmelCase_ : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase_ )
UpperCAmelCase_ : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase_ , revision="3dc6de3" , max_seq_len=50 , )
UpperCAmelCase_ : List[Any] = INVOICE_URL
UpperCAmelCase_ : Optional[int] = "What is the invoice number?"
UpperCAmelCase_ : int = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
UpperCAmelCase_ : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
UpperCAmelCase_ : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase_ ) , lowerCAmelCase_ , "" ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase_ : Dict = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
UpperCAmelCase_ : Optional[int] = INVOICE_URL
UpperCAmelCase_ : int = "What is the invoice number?"
UpperCAmelCase_ : List[str] = dqa_pipeline(image=lowerCAmelCase_ , question=lowerCAmelCase_ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
| 95 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase : Optional[Any] =logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ ):
'''simple docstring'''
lowercase : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> str:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
A : str =deprecated_arg[3:]
setattr(self , SCREAMING_SNAKE_CASE__ , not kwargs.pop(SCREAMING_SNAKE_CASE__ ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
A : Dict =kwargs.pop('torchscript' , self.torchscript )
A : List[str] =kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics )
A : Union[str, Any] =kwargs.pop('fp16_opt_level' , self.fpaa_opt_level )
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Trace the models using torchscript"} )
lowercase : bool = field(default=lowerCAmelCase_ , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
lowercase : str = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
A : List[str] =torch.device('cpu' )
A : Union[str, Any] =0
elif is_torch_tpu_available():
A : Union[str, Any] =xm.xla_device()
A : Union[str, Any] =0
else:
A : List[str] =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
A : Tuple =torch.cuda.device_count()
return device, n_gpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
requires_backends(self , ['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> "torch.device":
requires_backends(self , ['torch'] )
return self._setup_devices[0]
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
requires_backends(self , ['torch'] )
return self._setup_devices[1]
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
return self.n_gpu > 0
| 717 | import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A__ ( *lowercase: Tuple, lowercase: Optional[Union[Dict, Any]] = None, lowercase: Dict=True, lowercase: Any=2 ) -> List[Any]:
from .. import __version__
A : Optional[Any] =take_from
A : Union[str, Any] =()
if not isinstance(args[0], lowercase ):
A : List[str] =(args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase ).base_version ) >= version.parse(lowercase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
A : Tuple =None
if isinstance(lowercase, lowercase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase ),)
A : Union[str, Any] =F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(lowercase, lowercase ):
values += (getattr(lowercase, lowercase ),)
A : Optional[Any] =F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
A : List[Any] =F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
A : List[Any] =warning + ' ' if standard_warn else ''
warnings.warn(warning + message, lowercase, stacklevel=lowercase )
if isinstance(lowercase, lowercase ) and len(lowercase ) > 0:
A : Any =inspect.getouterframes(inspect.currentframe() )[1]
A : int =call_frame.filename
A : int =call_frame.lineno
A : Optional[int] =call_frame.function
A , A : int =next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(lowercase ) == 0:
return
elif len(lowercase ) == 1:
return values[0]
return values
| 661 | 0 |
lowerCAmelCase = 8.3_1_4_4_5_9_8
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCAmelCase = 300
lowerCAmelCase = 28
lowerCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 43 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : List[str] = 16
A__ : Dict = 32
def UpperCamelCase( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ):
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowerCAmelCase_ : int = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Tuple = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ : str = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : Any = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__UpperCamelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ : Optional[int] = 8
else:
lowerCAmelCase_ : Optional[int] = None
return tokenizer.pad(
__UpperCamelCase ,padding='''longest''' ,max_length=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
lowerCAmelCase_ : Tuple = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : Any = mocked_dataloaders # noqa: F811
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,__UpperCamelCase ) == "1":
lowerCAmelCase_ : Dict = 2
# New Code #
lowerCAmelCase_ : List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ : Union[str, Any] = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ : Optional[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : List[Any] = config['''lr''']
lowerCAmelCase_ : List[Any] = int(config['''num_epochs'''] )
lowerCAmelCase_ : int = int(config['''seed'''] )
lowerCAmelCase_ : Optional[Any] = int(config['''batch_size'''] )
lowerCAmelCase_ : Optional[Any] = evaluate.load('''glue''' ,'''mrpc''' )
set_seed(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = get_dataloaders(__UpperCamelCase ,__UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ : Tuple = AdamW(params=model.parameters() ,lr=__UpperCamelCase )
# Instantiate scheduler
lowerCAmelCase_ : Any = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=100 ,num_training_steps=(len(__UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase ,model=__UpperCamelCase ,local_sgd_steps=__UpperCamelCase ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
lowerCAmelCase_ : str = model(**__UpperCamelCase )
lowerCAmelCase_ : Tuple = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(**__UpperCamelCase )
lowerCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCamelCase ,references=__UpperCamelCase ,)
lowerCAmelCase_ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' ,type=__UpperCamelCase ,default=1 ,help='''The number of minibatches to be ran before gradients are accumulated.''' ,)
parser.add_argument(
'''--local_sgd_steps''' ,type=__UpperCamelCase ,default=8 ,help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
lowerCAmelCase_ : List[Any] = parser.parse_args()
lowerCAmelCase_ : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 171 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case : Union[str, Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCamelCase )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tiny-gpt2"
snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : Optional[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = "sgugger/tiny-distilbert-classification"
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , only_pretrain_model=__UpperCamelCase , )
snake_case : List[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = "sshleifer/tiny-gpt2"
snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , torchscript=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : int = PyTorchBenchmark(__UpperCamelCase )
snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = "sshleifer/tiny-gpt2"
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , fpaa=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : List[Any] = PyTorchBenchmark(__UpperCamelCase )
snake_case : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : int = "sshleifer/tiny-gpt2"
snake_case : Union[str, Any] = AutoConfig.from_pretrained(__UpperCamelCase )
# set architectures equal to `None`
snake_case : Union[str, Any] = None
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : Dict = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Dict = "sshleifer/tiny-gpt2"
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : Dict = PyTorchBenchmark(__UpperCamelCase )
snake_case : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can\'t do half precision" )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = "sshleifer/tiny-gpt2"
snake_case : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__UpperCamelCase , multi_process=__UpperCamelCase , )
snake_case : str = PyTorchBenchmark(__UpperCamelCase )
snake_case : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : List[str] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : List[Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tinier_bart"
snake_case : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : Any = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : Any = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Dict = "sshleifer/tinier_bart"
snake_case : Tuple = AutoConfig.from_pretrained(__UpperCamelCase )
snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCamelCase , )
snake_case : Union[str, Any] = PyTorchBenchmark(__UpperCamelCase , configs=[config] )
snake_case : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , save_to_csv=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCamelCase , "inf_time.csv" ) , train_memory_csv_file=os.path.join(__UpperCamelCase , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(__UpperCamelCase , "inf_mem.csv" ) , train_time_csv_file=os.path.join(__UpperCamelCase , "train_time.csv" ) , env_info_csv_file=os.path.join(__UpperCamelCase , "env.csv" ) , multi_process=__UpperCamelCase , )
snake_case : Dict = PyTorchBenchmark(__UpperCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "env.csv" ) ).exists() )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(__UpperCamelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCamelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCamelCase , "current" ) )
self.assertTrue(hasattr(__UpperCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCamelCase , inference=__UpperCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCamelCase , "log.txt" ) , log_print=__UpperCamelCase , trace_memory_line_by_line=__UpperCamelCase , multi_process=__UpperCamelCase , )
snake_case : str = PyTorchBenchmark(__UpperCamelCase )
snake_case : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__UpperCamelCase , "log.txt" ) ).exists() )
| 707 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def __lowerCAmelCase ( lowercase : Accelerator , lowercase : int = 16 ) -> Any:
"""simple docstring"""
snake_case : Optional[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case : Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase : int ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Tuple = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Dict = 16
elif accelerator.mixed_precision != "no":
snake_case : Union[str, Any] = 8
else:
snake_case : Any = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case : Dict = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
snake_case : str = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Any ) -> int:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
snake_case : Optional[Any] = 2
# New Code #
snake_case : Tuple = int(args.gradient_accumulation_steps )
snake_case : Any = int(args.local_sgd_steps )
# Initialize accelerator
snake_case : Optional[int] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : List[str] = config["lr"]
snake_case : Tuple = int(config["num_epochs"] )
snake_case : List[str] = int(config["seed"] )
snake_case : Optional[int] = int(config["batch_size"] )
snake_case : Optional[int] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
snake_case ,snake_case : Optional[Any] = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case : Tuple = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
snake_case : Any = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
with LocalSGD(
accelerator=lowercase , model=lowercase , local_sgd_steps=lowercase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
snake_case : Optional[int] = model(**lowercase )
snake_case : List[Any] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case : List[str] = model(**lowercase )
snake_case : str = outputs.logits.argmax(dim=-1 )
snake_case ,snake_case : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
snake_case : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowercase )
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
snake_case : Optional[int] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=lowercase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
snake_case : int = parser.parse_args()
snake_case : List[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 117 | 0 |
import sys
_a : Any = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a_ ( __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Dict = 1
for digit in s:
product *= int(__magic_name__ )
return product
def a_ ( __magic_name__ = N ) -> int:
"""simple docstring"""
snake_case : str = -sys.maxsize - 1
snake_case : Optional[int] = n[:13]
snake_case : Tuple = 13
while cur_index < len(__magic_name__ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
snake_case : str = substr[1:] + n[cur_index]
cur_index += 1
else:
snake_case : List[Any] = max(__magic_name__ , str_eval(__magic_name__ ) )
snake_case : Union[str, Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"{solution() = }")
| 598 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : str = logging.get_logger(__name__)
class a_ ( a ):
def __init__( self : List[str] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 598 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowerCamelCase_ , lowerCamelCase_ = array[indexa], array[indexa]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if length > 1:
lowerCamelCase_ = int(length / 2 )
for i in range(lowerCamelCase__ , low + middle ):
comp_and_swap(lowerCamelCase__ , lowerCamelCase__ , i + middle , lowerCamelCase__ )
bitonic_merge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
bitonic_merge(lowerCamelCase__ , low + middle , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if length > 1:
lowerCamelCase_ = int(length / 2 )
bitonic_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 1 )
bitonic_sort(lowerCamelCase__ , low + middle , lowerCamelCase__ , 0 )
bitonic_merge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
__A =input('''Enter numbers separated by a comma:\n''').strip()
__A =[int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 313 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
if radian_mode:
return [magnitude * cos(lowerCamelCase__ ), magnitude * sin(lowerCamelCase__ )]
return [magnitude * cos(radians(lowerCamelCase__ ) ), magnitude * sin(radians(lowerCamelCase__ ) )]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_0**-1 ):
lowerCamelCase_ = cross(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = sum(lowerCamelCase__ )
return abs(lowerCamelCase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
__A =array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
__A =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__A =array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
__A =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__A =array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
__A =array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 313 | 1 |
import os
def _UpperCAmelCase ( A = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(A ) , A ) ) as in_file:
UpperCAmelCase__ =in_file.read()
UpperCAmelCase__ =[[int(A ) for cell in row.split("," )] for row in data.strip().splitlines()]
UpperCAmelCase__ =[[0 for cell in row] for row in grid]
UpperCAmelCase__ =len(grid[0] )
UpperCAmelCase__ =[[0 for i in range(A )] for j in range(A )]
UpperCAmelCase__ =grid[0][0]
for i in range(1 , A ):
UpperCAmelCase__ =grid[0][i] + dp[0][i - 1]
for i in range(1 , A ):
UpperCAmelCase__ =grid[i][0] + dp[i - 1][0]
for i in range(1 , A ):
for j in range(1 , A ):
UpperCAmelCase__ =grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 625 |
def _UpperCAmelCase ( A ):
'''simple docstring'''
for i in range(len(A ) - 1 , 0 , -1 ):
UpperCAmelCase__ =False
for j in range(A , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase__ , UpperCAmelCase__ =unsorted[j - 1], unsorted[j]
UpperCAmelCase__ =True
for j in range(A ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase__ , UpperCAmelCase__ =unsorted[j + 1], unsorted[j]
UpperCAmelCase__ =True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 625 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A ( a_ : Union[List, PIL.Image.Image, torch.Tensor] )-> List[Any]:
'''simple docstring'''
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , a_ , )
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = image[0].size
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE : str = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : List[str] = np.array(a_ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : List[str] = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(a_ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = torch.cat(a_ , dim=0 )
return image
def __A ( a_ : Union[List, PIL.Image.Image, torch.Tensor] )-> Any:
'''simple docstring'''
if isinstance(a_ , torch.Tensor ):
return mask
elif isinstance(a_ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = mask[0].size
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE : List[str] = np.concatenate(a_ , axis=0 )
SCREAMING_SNAKE_CASE : Any = mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(a_ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Tuple = torch.cat(a_ , dim=0 )
return mask
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self :Tuple , lowerCamelCase_ :Union[torch.Tensor, PIL.Image.Image] , lowerCamelCase_ :Union[torch.Tensor, PIL.Image.Image] , lowerCamelCase_ :int = 2_50 , lowerCamelCase_ :float = 0.0 , lowerCamelCase_ :int = 10 , lowerCamelCase_ :int = 10 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = image
SCREAMING_SNAKE_CASE : Any = _preprocess_image(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = _preprocess_mask(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
SCREAMING_SNAKE_CASE : Tuple = original_image.shape
SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.device )
SCREAMING_SNAKE_CASE : Optional[Any] = eta
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE : Dict = generator[0] if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE : str = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE : int = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.undo_step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = t
SCREAMING_SNAKE_CASE : Any = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 18 |
"""simple docstring"""
import math
def __A ( a_ : list , a_ : int )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = len(a_ )
SCREAMING_SNAKE_CASE : Optional[Any] = int(math.floor(math.sqrt(a_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = 0
while arr[min(a_ , a_ ) - 1] < x:
SCREAMING_SNAKE_CASE : Optional[Any] = step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
SCREAMING_SNAKE_CASE : Any = prev + 1
if prev == min(a_ , a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ : List[str] = [int(item) for item in user_input.split(",")]
lowerCamelCase__ : Dict = int(input("Enter the number to be searched:\n"))
lowerCamelCase__ : Tuple = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(f'''Number {x} is at index {res}''')
| 18 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 688 |
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_a : List[str] = b * b - 4 * a * c
_a : Any = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
_a : int = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase_ ( ):
_a , _a : str = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 471 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__a: Optional[int] = 2
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , *, # begin keyword-only arguments
__lowerCAmelCase="<s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=None , ) -> Union[str, Any]:
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = bos, unk, pad, eos
lowercase__ : str = []
lowercase__ : Union[str, Any] = []
lowercase__ : Dict = {}
lowercase__ : Union[str, Any] = self.add_symbol(_A )
lowercase__ : int = self.add_symbol(_A )
lowercase__ : Dict = self.add_symbol(_A )
lowercase__ : Dict = self.add_symbol(_A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_A )
lowercase__ : Optional[int] = len(self.symbols )
def __eq__( self , __lowerCAmelCase ) -> str:
return self.indices == other.indices
def __getitem__( self , __lowerCAmelCase ) -> Any:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> Tuple:
return len(self.symbols )
def __contains__( self , __lowerCAmelCase ) -> Any:
return sym in self.indices
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase ) -> List[Any]:
lowercase__ : Tuple = cls()
d.add_from_file(_A )
return d
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=1 , __lowerCAmelCase=False ) -> str:
if word in self.indices and not overwrite:
lowercase__ : int = self.indices[word]
lowercase__ : Dict = self.count[idx] + n
return idx
else:
lowercase__ : Any = len(self.symbols )
lowercase__ : Optional[int] = idx
self.symbols.append(_A )
self.count.append(_A )
return idx
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
return 0
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Dict:
if isinstance(_A , _A ):
try:
with open(_A , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_A ) )
return
lowercase__ : Optional[Any] = f.readlines()
lowercase__ : List[Any] = self._load_meta(_A )
for line in lines[indices_start_line:]:
try:
lowercase__ , lowercase__ : List[Any] = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
lowercase__ : Any = True
lowercase__ , lowercase__ : List[str] = line.rsplit(''' ''' , 1 )
else:
lowercase__ : Tuple = False
lowercase__ : Optional[Any] = int(_A )
lowercase__ : Tuple = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_A ) )
self.add_symbol(_A , n=_A , overwrite=_A )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def __UpperCamelCase ( UpperCAmelCase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowercase__ : Optional[int] = dict((re.sub(r'''@@$''' , '''''' , __snake_case ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , __snake_case ), v) for k, v in d.items() )
lowercase__ : Tuple = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowercase__ : Union[str, Any] = d[k] # restore
return da
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
# prep
if not os.path.exists(__snake_case ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(__snake_case , exist_ok=__snake_case )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowercase__ : str = os.path.join(__snake_case , '''checkpoint.pt''' )
if not os.path.isfile(__snake_case ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
lowercase__ : str = torch.load(__snake_case , map_location='''cpu''' )
lowercase__ : Optional[int] = chkpt['''cfg''']['''model''']
# dicts
lowercase__ : Dict = os.path.join(__snake_case , '''dict.txt''' )
if not os.path.isfile(__snake_case ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
lowercase__ : Optional[Any] = Dictionary.load(__snake_case )
lowercase__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
lowercase__ : List[Any] = len(__snake_case )
lowercase__ : int = os.path.join(__snake_case , VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# merges_file (bpecodes)
lowercase__ : List[Any] = os.path.join(__snake_case , '''bpecodes''' )
if not os.path.isfile(__snake_case ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
lowercase__ : Any = os.path.join(__snake_case , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(__snake_case , __snake_case )
# model config
lowercase__ : Union[str, Any] = os.path.join(__snake_case , '''config.json''' )
lowercase__ : Any = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1E-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# tokenizer config
lowercase__ : Union[str, Any] = os.path.join(__snake_case , __snake_case )
lowercase__ : Any = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(__snake_case , ensure_ascii=__snake_case , indent=__snake_case ) )
# model
lowercase__ : Tuple = chkpt['''model''']
# remove unneeded keys
lowercase__ : Union[str, Any] = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(__snake_case , __snake_case )
lowercase__ : Any = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
lowercase__ : Any = model_state_dict.pop(__snake_case )
else:
lowercase__ : Dict = model_state_dict.pop(__snake_case )
lowercase__ : Union[str, Any] = BioGptConfig.from_pretrained(__snake_case )
lowercase__ : Dict = BioGptForCausalLM(__snake_case )
# check that it loads ok
model_new.load_state_dict(__snake_case )
# save
lowercase__ : Tuple = os.path.join(__snake_case , __snake_case )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(__snake_case , __snake_case )
print('''Conversion is done!''' )
if __name__ == "__main__":
__a: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: List[Any] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 705 | '''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a: Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : str = PegasusTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> List[str]:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = '''</s>'''
lowercase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__lowerCAmelCase ) , 1103 )
def _lowerCAmelCase( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase__ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase__ : int = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowercase__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : str = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 150, '''short example''']
lowercase__ : Tuple = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Optional[Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : Dict = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def _lowerCAmelCase( self ) -> int:
# fmt: off
lowercase__ : List[Any] = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Optional[Any] = PegasusTokenizer(__lowerCAmelCase , offset=0 , mask_token_sent=__lowerCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> int:
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_torch
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowercase__ : Dict = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Union[str, Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : str = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase__ : Dict = self._large_tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(
__lowerCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 428 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 391 |
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _UpperCamelCase ( A ):
return input_array.reshape((input_array.size, 1) )
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =np.nan
for i in range(A ):
UpperCamelCase_ =features[:, labels == i]
UpperCamelCase_ =data.mean(1 )
# Centralize the data of class i
UpperCamelCase_ =data - column_reshape(A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase_ =np.dot(A , centered_data.T )
return covariance_sum / features.shape[1]
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =features.mean(1 )
UpperCamelCase_ =np.nan
for i in range(A ):
UpperCamelCase_ =features[:, labels == i]
UpperCamelCase_ =data.shape[1]
UpperCamelCase_ =data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase_ =device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
return covariance_sum / features.shape[1]
def _UpperCamelCase ( A , A ):
# Check if the features have been loaded
if features.any():
UpperCamelCase_ =features.mean(1 )
# Center the dataset
UpperCamelCase_ =features - np.reshape(A , (data_mean.size, 1) )
UpperCamelCase_ =np.dot(A , centered_data.T ) / features.shape[1]
UpperCamelCase_ , UpperCamelCase_ =np.linalg.eigh(A )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase_ =np.dot(filtered_eigenvectors.T , A )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def _UpperCamelCase ( A , A , A , A ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase_ , UpperCamelCase_ =eigh(
covariance_between_classes(A , A , A ) , covariance_within_classes(A , A , A ) , )
UpperCamelCase_ =eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =np.linalg.svd(A )
UpperCamelCase_ =svd_matrix[:, 0:dimensions]
UpperCamelCase_ =np.dot(filtered_svd_matrix.T , A )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def _UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase_ =np.array([0, 0, 0, 1, 1] )
UpperCamelCase_ =2
UpperCamelCase_ =2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A ) as error_info:
UpperCamelCase_ =linear_discriminant_analysis(
A , A , A , A )
if isinstance(A , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def _UpperCamelCase ( ):
UpperCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase_ =2
UpperCamelCase_ =np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(A ) as error_info:
UpperCamelCase_ =principal_component_analysis(A , A )
if not np.allclose(A , A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('repo_id' ,['canonical_dataset_name', 'org-name/dataset-name'] )
@pytest.mark.parametrize('path' ,['filename.csv', 'filename with blanks.csv'] )
@pytest.mark.parametrize('revision' ,[None, 'v2'] )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =hf_hub_url(repo_id=lowerCAmelCase_ ,path=lowerCAmelCase_ ,revision=lowerCAmelCase_ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(lowerCAmelCase_ )}"""
| 153 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =tmp_path / 'file.csv'
SCREAMING_SNAKE_CASE_ : Union[str, Any] =textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(lowerCAmelCase_ ,'w' ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =tmp_path / 'malformed_file.csv'
SCREAMING_SNAKE_CASE_ : Tuple =textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(lowerCAmelCase_ ,'w' ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =tmp_path / 'csv_with_image.csv'
SCREAMING_SNAKE_CASE_ : str =textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(lowerCAmelCase_ ,'w' ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =tmp_path / 'csv_with_label.csv'
SCREAMING_SNAKE_CASE_ : str =textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(lowerCAmelCase_ ,'w' ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =tmp_path / 'csv_with_int_list.csv'
SCREAMING_SNAKE_CASE_ : List[str] =textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(lowerCAmelCase_ ,'w' ) as f:
f.write(lowerCAmelCase_ )
return str(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =Csv()
SCREAMING_SNAKE_CASE_ : int =csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCAmelCase_ ,match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(lowerCAmelCase_ ) in record.message
for record in caplog.records )
@require_pil
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
with open(lowerCAmelCase_ ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] =f.read().splitlines()[1]
SCREAMING_SNAKE_CASE_ : Optional[Any] =Csv(encoding='utf-8' ,features=Features({'image': Image()} ) )
SCREAMING_SNAKE_CASE_ : Tuple =csv._generate_tables([[csv_file_with_image]] )
SCREAMING_SNAKE_CASE_ : List[Any] =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
SCREAMING_SNAKE_CASE_ : int =pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[str] ) -> str:
"""simple docstring"""
with open(lowerCAmelCase_ ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] =f.read().splitlines()[1:]
SCREAMING_SNAKE_CASE_ : List[str] =Csv(encoding='utf-8' ,features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] =csv._generate_tables([[csv_file_with_label]] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(lowerCAmelCase_ ) for label in labels]
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =Csv(encoding='utf-8' ,sep=',' ,converters={'int_list': lambda lowerCAmelCase_ : [int(lowerCAmelCase_ ) for i in x.split()]} )
SCREAMING_SNAKE_CASE_ : Optional[int] =csv._generate_tables([[csv_file_with_int_list]] )
SCREAMING_SNAKE_CASE_ : List[str] =pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
SCREAMING_SNAKE_CASE_ : int =pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 153 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ = get_tests_dir('fixtures/dummy-config.json')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__A , __A )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.for_model("roberta" )
self.assertIsInstance(__A , __A )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE : Dict = os.path.join(__A , "fake-roberta" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
try:
AutoConfig.register("custom" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("model" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("bert" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
__A , "bert-base is not a local folder and is not a valid model identifier" ):
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained("bert-base" )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__A , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(__A , revision="aaaaaa" )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
__A , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
class _UpperCamelCase ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ ='''new-model'''
try:
AutoConfig.register("new-model" , __A )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"] | 25 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[str] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'''microsoft/unispeech-large-1500h-cv''': (
'''https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'''
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase_ ( _lowercase ):
_lowercase : Dict = '''unispeech'''
def __init__( self : str , __A : Tuple=32 , __A : List[str]=768 , __A : Dict=12 , __A : Union[str, Any]=12 , __A : Tuple=3072 , __A : Any="gelu" , __A : int=0.1 , __A : Optional[int]=0.1 , __A : List[Any]=0.1 , __A : Any=0.0 , __A : List[str]=0.0 , __A : int=0.1 , __A : List[Any]=0.1 , __A : List[str]=0.0_2 , __A : List[str]=1e-5 , __A : List[Any]="group" , __A : int="gelu" , __A : Any=(512, 512, 512, 512, 512, 512, 512) , __A : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __A : Tuple=(10, 3, 3, 3, 3, 2, 2) , __A : Optional[int]=False , __A : Any=128 , __A : Union[str, Any]=16 , __A : Optional[Any]=False , __A : str=True , __A : Dict=0.0_5 , __A : Optional[Any]=10 , __A : Dict=2 , __A : int=0.0 , __A : List[str]=10 , __A : str=0 , __A : List[str]=320 , __A : List[Any]=2 , __A : Tuple=0.1 , __A : Optional[int]=100 , __A : Any=256 , __A : Dict=256 , __A : Tuple=0.1 , __A : List[str]="mean" , __A : int=False , __A : List[str]=False , __A : List[Any]=256 , __A : str=80 , __A : Tuple=0 , __A : Tuple=1 , __A : int=2 , __A : Dict=0.5 , **__A : List[Any] , ):
super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A )
__A : Dict = hidden_size
__A : Optional[Any] = feat_extract_norm
__A : List[Any] = feat_extract_activation
__A : str = list(__A )
__A : Optional[Any] = list(__A )
__A : Optional[int] = list(__A )
__A : List[Any] = conv_bias
__A : Optional[int] = num_conv_pos_embeddings
__A : List[Any] = num_conv_pos_embedding_groups
__A : int = len(self.conv_dim )
__A : Optional[Any] = num_hidden_layers
__A : List[str] = intermediate_size
__A : Union[str, Any] = hidden_act
__A : Optional[int] = num_attention_heads
__A : Tuple = hidden_dropout
__A : Optional[Any] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : Dict = feat_proj_dropout
__A : Optional[int] = final_dropout
__A : Dict = layerdrop
__A : Optional[int] = layer_norm_eps
__A : Optional[Any] = initializer_range
__A : Optional[int] = num_ctc_classes
__A : Dict = vocab_size
__A : List[str] = do_stable_layer_norm
__A : Tuple = use_weighted_layer_sum
__A : Any = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : Union[str, Any] = mask_time_prob
__A : Optional[Any] = mask_time_length
__A : List[Any] = mask_time_min_masks
__A : List[Any] = mask_feature_prob
__A : Any = mask_feature_length
__A : List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__A : Any = num_codevectors_per_group
__A : Tuple = num_codevector_groups
__A : List[str] = contrastive_logits_temperature
__A : Optional[int] = feat_quantizer_dropout
__A : int = num_negatives
__A : List[str] = codevector_dim
__A : int = proj_codevector_dim
__A : Union[str, Any] = diversity_loss_weight
# ctc loss
__A : List[str] = ctc_loss_reduction
__A : Any = ctc_zero_infinity
# pretraining loss
__A : Union[str, Any] = replace_prob
@property
def lowerCAmelCase_ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 17 | 0 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase ="""gptj"""
__UpperCAmelCase ={
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=5_0_4_0_0 , _A=2_0_4_8 , _A=4_0_9_6 , _A=2_8 , _A=1_6 , _A=6_4 , _A=None , _A="gelu_new" , _A=0.0 , _A=0.0 , _A=0.0 , _A=1e-5 , _A=0.02 , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , **_A , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_embd
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = rotary_dim
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = use_cache
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(
bos_token_id=_A , eos_token_id=_A , tie_word_embeddings=_A , **_A )
class snake_case_ ( _a ):
"""simple docstring"""
def __init__( self , _A , _A = "default" , _A = None , _A = False , ):
super().__init__(_A , task=_A , patching_specs=_A , use_past=_A )
if not getattr(self._config , 'pad_token_id' , _A ):
# TODO: how to do that better?
__lowerCAmelCase = 0
@property
def A__ ( self ):
__lowerCAmelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
__lowerCAmelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A__ ( self ):
return self._config.n_layer
@property
def A__ ( self ):
return self._config.n_head
def A__ ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
__lowerCAmelCase = super(_A , self ).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase, __lowerCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase = seqlen + 2
__lowerCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowerCAmelCase = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(self.num_layers )
]
__lowerCAmelCase = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
return ordered_inputs
@property
def A__ ( self ):
return 1_3
| 102 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = args.log_outputs
__lowerCAmelCase = '_'.join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
__lowerCAmelCase = load_metric('wer' )
__lowerCAmelCase = load_metric('cer' )
# compute metrics
__lowerCAmelCase = wer.compute(references=result['target'] , predictions=result['prediction'] )
__lowerCAmelCase = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
__lowerCAmelCase = F"""WER: {wer_result}\nCER: {cer_result}"""
print(UpperCAmelCase__ )
with open(F"""{dataset_id}_eval_results.txt""" , 'w' ) as f:
f.write(UpperCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__lowerCAmelCase = F"""log_{dataset_id}_predictions.txt"""
__lowerCAmelCase = F"""log_{dataset_id}_targets.txt"""
with open(UpperCAmelCase__ , 'w' ) as p, open(UpperCAmelCase__ , 'w' ) as t:
# mapping function to write output
def write_to_file(UpperCAmelCase__ , UpperCAmelCase__ ):
p.write(F"""{i}""" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F"""{i}""" + '\n' )
t.write(batch['target'] + '\n' )
result.map(UpperCAmelCase__ , with_indices=UpperCAmelCase__ )
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__lowerCAmelCase = re.sub(UpperCAmelCase__ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__lowerCAmelCase = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
__lowerCAmelCase = ' '.join(text.split(UpperCAmelCase__ ) )
return text
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=UpperCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__lowerCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id )
__lowerCAmelCase = feature_extractor.sampling_rate
# resample audio
__lowerCAmelCase = dataset.cast_column('audio' , Audio(sampling_rate=UpperCAmelCase__ ) )
# load eval pipeline
if args.device is None:
__lowerCAmelCase = 0 if torch.cuda.is_available() else -1
__lowerCAmelCase = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(UpperCAmelCase__ ):
__lowerCAmelCase = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__lowerCAmelCase = prediction['text']
__lowerCAmelCase = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
__lowerCAmelCase = dataset.map(UpperCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
lowerCamelCase = parser.parse_args()
main(args)
| 102 | 1 |
"""simple docstring"""
import requests
UpperCamelCase__ = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def lowerCamelCase ( _snake_case ):
# fetching a list of articles in json format
UpperCAmelCase__ : int = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] ,1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 110 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 52 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = OpenAIGPTTokenizer
UpperCamelCase_ = OpenAIGPTTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase_ : Optional[int] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Optional[int] = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) )
with open(self.merges_file ,"w" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
lowerCAmelCase_ : Union[str, Any] = "lower"
lowerCAmelCase_ : List[str] = ["low", "er</w>"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = tokens + ["<unk>"]
lowerCAmelCase_ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str]=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __snake_case ( snake_case__ ):
"""simple docstring"""
pass
| 683 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from scipy.stats import spearmanr
import datasets
_lowercase = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
_lowercase = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
_lowercase = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) ,reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] ,)
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : str=False ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[str] = spearmanr(lowerCAmelCase__ ,lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 659 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : List[Any] = bs[:]
lowerCAmelCase_ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Tuple = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : int ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Tuple="replace" ,lowerCAmelCase__ : Optional[int]="<s>" ,lowerCAmelCase__ : Optional[int]="</s>" ,lowerCAmelCase__ : Tuple="</s>" ,lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : Union[str, Any]="<unk>" ,lowerCAmelCase__ : str="<pad>" ,lowerCAmelCase__ : Tuple="<mask>" ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Tuple ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[str] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[int] = bytes_to_unicode()
lowerCAmelCase_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : List[str] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Optional[int] = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = bigram
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : str = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Union[str, Any] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : List[str] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[int] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Dict = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : List[str] = " " + text
return (text, kwargs)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> dict:
'''simple docstring'''
lowerCAmelCase_ : int = super()._pad(
encoded_inputs=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding_strategy=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,)
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase_ : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase_ : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase_ : List[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase_ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase_ : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 659 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*"""
_SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
_SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
_SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2
_SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
_SCREAMING_SNAKE_CASE : Dict = value
_SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3
_SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim]
_SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2]
_SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :]
_SCREAMING_SNAKE_CASE : Dict = query_layer
_SCREAMING_SNAKE_CASE : List[Any] = key_layer
_SCREAMING_SNAKE_CASE : Dict = value_layer
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = value
return model_state_dict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
_SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict()
_SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : int = ClapConfig()
_SCREAMING_SNAKE_CASE : Tuple = enable_fusion
_SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 635 | """simple docstring"""
import argparse
from collections import defaultdict
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int:
_SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}("""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}("""
_SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}"""
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Any = 0
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
_SCREAMING_SNAKE_CASE : Dict = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
_SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
_SCREAMING_SNAKE_CASE : str = None
with open(__SCREAMING_SNAKE_CASE , """r""" ) as f:
_SCREAMING_SNAKE_CASE : str = f.readlines()
_SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowerCAmelCase_ = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 635 | 1 |
import qiskit
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
UpperCamelCase_ : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
UpperCamelCase_ : Tuple = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase_ : List[str] = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 635 | import numpy as np
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 635 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[Any] = "visual_bert"
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=512 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> List[str]:
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = visual_embedding_dim
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = bypass_transformer
_lowerCAmelCase = special_visual_initialize
| 707 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = "T5Config"
def __a(SCREAMING_SNAKE_CASE_ : jnp.array , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = jnp.zeros_like(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_lowerCAmelCase = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return shifted_input_ids
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "mt5"
__lowerCamelCase : Any = MTaConfig
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = "mt5"
__lowerCamelCase : Dict = MTaConfig
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Optional[Any] = "mt5"
__lowerCamelCase : str = MTaConfig
| 489 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[int] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__A : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def lowerCAmelCase_ ( a : str , a : Any , a : Union[str, Any] , a : int , a : Union[str, Any] ):
for attribute in key.split('.' ):
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
a__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCAmelCase_ ( a : int , a : List[str] ):
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
a__ = True
else:
for key, mapped_key in MAPPING.items():
a__ = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
a__ = True
if "*" in mapped_key:
a__ = name.split(__lowerCAmelCase )[0].split('.' )[-2]
a__ = mapped_key.replace('*' , __lowerCAmelCase )
if "weight_g" in name:
a__ = 'weight_g'
elif "weight_v" in name:
a__ = 'weight_v'
elif "bias" in name:
a__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ = 'weight'
else:
a__ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCAmelCase_ ( a : List[Any] , a : Optional[int] , a : List[Any] , a : Dict , a : Any ):
a__ = full_name.split('conv_layers.' )[-1]
a__ = name.split('.' )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def lowerCAmelCase_ ( a : Optional[int] , a : Optional[int] , a : str=None , a : str=None , a : str=True ):
if config_path is not None:
a__ = UniSpeechSatConfig.from_pretrained(__lowerCAmelCase )
else:
a__ = UniSpeechSatConfig()
a__ = ''
if is_finetuned:
a__ = UniSpeechSatForCTC(__lowerCAmelCase )
else:
a__ = UniSpeechSatForPreTraining(__lowerCAmelCase )
a__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
a__ = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__A : Dict = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 394 |
_lowerCamelCase : Optional[Any] = 256
# Modulus to hash a string
_lowerCamelCase : Optional[Any] = 1_000_003
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE : Tuple = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = len(__lowerCAmelCase )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Tuple = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE : Union[str, Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE : Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ) -> None:
SCREAMING_SNAKE_CASE : Any = 'abc1abc12'
SCREAMING_SNAKE_CASE : str = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
SCREAMING_SNAKE_CASE : List[str] = 'ABABX'
SCREAMING_SNAKE_CASE : int = 'ABABZABABYABABX'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
SCREAMING_SNAKE_CASE : int = 'AAAB'
SCREAMING_SNAKE_CASE : Tuple = 'ABAAAAAB'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
SCREAMING_SNAKE_CASE : Tuple = 'abcdabcy'
SCREAMING_SNAKE_CASE : Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
SCREAMING_SNAKE_CASE : List[Any] = 'Lü'
SCREAMING_SNAKE_CASE : Optional[Any] = 'Lüsai'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'Lue'
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp() | 352 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Dict , __lowercase :Optional[Any] , __lowercase :Union[str, Any]=7 , __lowercase :Any=3 , __lowercase :List[str]=18 , __lowercase :Any=30 , __lowercase :Tuple=400 , __lowercase :Any=True , __lowercase :Tuple=None , __lowercase :Optional[Any]=True , __lowercase :str=None , __lowercase :str=True , ):
__lowerCamelCase : Optional[int] =size if size is not None else {"shortest_edge": 20}
__lowerCamelCase : int =crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCamelCase : Union[str, Any] =parent
__lowerCamelCase : Any =batch_size
__lowerCamelCase : Dict =num_channels
__lowerCamelCase : List[str] =image_size
__lowerCamelCase : Optional[int] =min_resolution
__lowerCamelCase : List[str] =max_resolution
__lowerCamelCase : str =do_resize
__lowerCamelCase : int =size
__lowerCamelCase : Optional[int] =do_center_crop
__lowerCamelCase : List[Any] =crop_size
__lowerCamelCase : Optional[Any] =do_flip_channel_order
def __lowercase ( self :Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__snake_case : str = MobileViTImageProcessor if is_vision_available() else None
def __lowercase ( self :List[str] ):
__lowerCamelCase : Dict =MobileViTImageProcessingTester(self )
@property
def __lowercase ( self :List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self :str ):
__lowerCamelCase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
self.assertTrue(hasattr(__A , '''do_center_crop''' ) )
self.assertTrue(hasattr(__A , '''center_crop''' ) )
self.assertTrue(hasattr(__A , '''do_flip_channel_order''' ) )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__lowerCamelCase : Dict =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowercase ( self :Union[str, Any] ):
pass
def __lowercase ( self :Optional[int] ):
# Initialize image_processing
__lowerCamelCase : Any =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__lowerCamelCase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCamelCase : Optional[int] =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowercase ( self :str ):
# Initialize image_processing
__lowerCamelCase : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__lowerCamelCase : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCamelCase : Union[str, Any] =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowercase ( self :int ):
# Initialize image_processing
__lowerCamelCase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__lowerCamelCase : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowerCamelCase : Dict =image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 703 |
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
__lowerCamelCase : int =from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__lowerCamelCase : str =flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__lowerCamelCase : List[str] =jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
__lowerCamelCase : int =''''''
__lowerCamelCase : List[str] =flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
__lowerCamelCase : List[str] =pt_model.state_dict()
# keep track of unexpected & missing keys
__lowerCamelCase : Union[str, Any] =[]
__lowerCamelCase : int =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__lowerCamelCase : Tuple =flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__lowerCamelCase : Optional[int] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Union[str, Any] =jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__lowerCamelCase : Optional[Any] =flax_key_tuple_array[:-1] + ['''weight''']
__lowerCamelCase : Dict =flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__lowerCamelCase : str =flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Union[str, Any] =(
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__lowerCamelCase : Optional[int] ='''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__lowerCamelCase : Tuple =np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
__lowerCamelCase : Any =torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
__lowerCamelCase : Optional[Any] =list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 363 | 0 |
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Any = len(_UpperCamelCase ), len(grid[0] )
if (
min(_UpperCamelCase , _UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase__: Union[str, Any] = 0
count += depth_first_search(_UpperCamelCase , row + 1 , _UpperCamelCase , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , row - 1 , _UpperCamelCase , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , _UpperCamelCase , col + 1 , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , _UpperCamelCase , col - 1 , _UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = 'Hello world! cécé herlolip'
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowerCamelCase__: Any = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
lowerCamelCase__: Any = roberta.model.encoder.sentence_encoder
lowerCamelCase__: Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowerCamelCase__: Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , _UpperCamelCase )
lowerCamelCase__: str = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase__: Union[str, Any] = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase__: List[str] = roberta_sent_encoder.embed_positions.weight
lowerCamelCase__: Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase__: Any = roberta_sent_encoder.layer_norm.weight
lowerCamelCase__: Tuple = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase__: BertLayer = model.roberta.encoder.layer[i]
lowerCamelCase__: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowerCamelCase__: RobertaAttention = layer.attention
lowerCamelCase__: Union[str, Any] = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase__: Any = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase__: BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase__: Tuple = roberta_layer.self_attn.q_proj.weight
lowerCamelCase__: Optional[int] = roberta_layer.self_attn.q_proj.bias
lowerCamelCase__: Optional[int] = roberta_layer.self_attn.k_proj.weight
lowerCamelCase__: int = roberta_layer.self_attn.k_proj.bias
lowerCamelCase__: Union[str, Any] = roberta_layer.self_attn.v_proj.weight
lowerCamelCase__: List[str] = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase__: BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase__: int = roberta_layer.self_attn.out_proj.weight
lowerCamelCase__: Tuple = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase__: Any = roberta_layer.final_layer_norm.weight
lowerCamelCase__: Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase__: BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase__: Tuple = roberta_layer.fca.weight
lowerCamelCase__: Tuple = roberta_layer.fca.bias
# output
lowerCamelCase__: BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase__: Any = roberta_layer.fca.weight
lowerCamelCase__: Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase__: Dict = roberta.model.classification_heads["""mnli"""].dense.weight
lowerCamelCase__: Union[str, Any] = roberta.model.classification_heads["""mnli"""].dense.bias
lowerCamelCase__: str = roberta.model.classification_heads["""mnli"""].out_proj.weight
lowerCamelCase__: List[str] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase__: Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase__: Optional[int] = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase__: List[Any] = roberta.model.encoder.lm_head.weight
lowerCamelCase__: Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase__: torch.Tensor = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase__: Dict = model(_UpperCamelCase )[0]
if classification_head:
lowerCamelCase__: Optional[int] = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_UpperCamelCase ) )
else:
lowerCamelCase__: List[Any] = roberta.model(_UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase__: Optional[int] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase__: List[Any] = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_lowercase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 306 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: Union[str, Any] = tmp_path / "cache"
_lowercase: List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowercase: Optional[int] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: List[Any] = tmp_path / "cache"
_lowercase: Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowercase: Optional[Any] = features.copy() if features else default_expected_features
_lowercase: Tuple = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowercase: Tuple = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
def __lowerCAmelCase ( __magic_name__ ):
with contextlib.closing(sqlitea.connect(__magic_name__ ) ) as con:
_lowercase: int = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: List[str] = tmp_path / "cache"
_lowercase: Optional[int] = os.path.join(__magic_name__ , "tmp.sql" )
_lowercase: Dict = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
_lowercase: List[str] = iter_sql_file(__magic_name__ )
_lowercase: Tuple = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: List[Any] = tmp_path / "cache"
_lowercase: Union[str, Any] = os.path.join(__magic_name__ , "tmp.sql" )
_lowercase: List[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
_lowercase: Optional[Any] = iter_sql_file(__magic_name__ )
_lowercase: int = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: List[Any] = tmp_path / "cache"
_lowercase: int = os.path.join(__magic_name__ , "tmp.sql" )
_lowercase: Union[str, Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=__magic_name__ ).read()
with pytest.raises(__magic_name__ ):
SqlDatasetWriter(__magic_name__ , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 206 |
def __lowerCAmelCase ( __magic_name__ ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_SCREAMING_SNAKE_CASE : str = int(input('Enter number: ').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 206 | 1 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowerCAmelCase ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowerCAmelCase ( ):
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , """open""" , UpperCamelCase_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowerCAmelCase ( ):
# pandas.read_csv is not present in _test_patching
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching , """pandas.read_csv""" , UpperCamelCase_ ):
pass
def _lowerCAmelCase ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , """len""" , UpperCamelCase_ ) is None
with patch_submodule(_test_patching , """len""" , UpperCamelCase_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_start_and_stop_mock__"""
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , """open""" , UpperCamelCase_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowerCAmelCase ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_join__"""
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_dirname__"""
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase_ ):
with patch_submodule(_test_patching , """os.rename""" , UpperCamelCase_ ):
with patch_submodule(_test_patching , """os.path.dirname""" , UpperCamelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , """os.rename""" , UpperCamelCase_ ):
with patch_submodule(_test_patching , """os.path.join""" , UpperCamelCase_ ):
with patch_submodule(_test_patching , """os.path.dirname""" , UpperCamelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , UpperCamelCase_ ):
pass
with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , UpperCamelCase_ ):
pass
| 155 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[int] = '''speech_to_text'''
__lowercase : List[str] = ['''past_key_values''']
__lowercase : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowerCAmelCase__=1_0_0_0_0 , lowerCAmelCase__=1_2 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=4 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=6_0_0_0 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=2 , lowerCAmelCase__=(5, 5) , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=8_0 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True
__SCREAMING_SNAKE_CASE = max_source_positions
__SCREAMING_SNAKE_CASE = max_target_positions
__SCREAMING_SNAKE_CASE = num_conv_layers
__SCREAMING_SNAKE_CASE = list(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = conv_channels
__SCREAMING_SNAKE_CASE = input_feat_per_channel
__SCREAMING_SNAKE_CASE = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`.")
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 155 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
__A : Dict = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__A : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_A , metadata={"help": "A folder containing the training data."} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_A , metadata={"help": "A folder containing the validation data."} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
SCREAMING_SNAKE_CASE_ : int = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
SCREAMING_SNAKE_CASE_ : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def A ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ : List[Any] = {}
if self.train_dir is not None:
lowercase_ : Dict = self.train_dir
if self.validation_dir is not None:
lowercase_ : str = self.validation_dir
lowercase_ : Optional[int] = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : str = field(
default=_A , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_A )} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_A , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
SCREAMING_SNAKE_CASE_ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
SCREAMING_SNAKE_CASE_ : str = field(default=_A , metadata={"help": "Name or path of preprocessor config."} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_A , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_A , metadata={"help": "Stride to use for the encoder."} , )
class _UpperCAmelCase :
def __init__( self : List[Any] , A : Dict=1_92 , A : Optional[int]=32 , A : Dict=4 , A : List[str]=0.6 ) -> str:
lowercase_ : str = input_size
lowercase_ : str = mask_patch_size
lowercase_ : Union[str, Any] = model_patch_size
lowercase_ : Dict = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
lowercase_ : Tuple = self.input_size // self.mask_patch_size
lowercase_ : Dict = self.mask_patch_size // self.model_patch_size
lowercase_ : Optional[Any] = self.rand_size**2
lowercase_ : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : str ) -> str:
lowercase_ : Optional[Any] = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ : str = np.zeros(self.token_count , dtype=A )
lowercase_ : Dict = 1
lowercase_ : Tuple = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ : str = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowercase ( __snake_case : Union[str, Any] ):
lowercase_ : Optional[Any] = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase_ : Optional[int] = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , __snake_case , __snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ : str = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
lowercase_ : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ : Union[str, Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0:
lowercase_ : Optional[Any] = ds['''train'''].train_test_split(data_args.train_val_split )
lowercase_ : Any = split['''train''']
lowercase_ : str = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ : int = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **__snake_case )
elif model_args.model_name_or_path:
lowercase_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
lowercase_ : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(__snake_case , '''decoder_type''' ):
lowercase_ : str = '''simmim'''
# adapt config
lowercase_ : str = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ : List[str] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case )
elif model_args.model_name_or_path:
lowercase_ : Tuple = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case )
else:
lowercase_ : List[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ : List[Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ : Union[str, Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
lowercase_ : List[str] = AutoModelForMaskedImageModeling.from_config(__snake_case )
if training_args.do_train:
lowercase_ : List[Any] = ds['''train'''].column_names
else:
lowercase_ : int = ds['''validation'''].column_names
if data_args.image_column_name is not None:
lowercase_ : int = data_args.image_column_name
elif "image" in column_names:
lowercase_ : Union[str, Any] = '''image'''
elif "img" in column_names:
lowercase_ : List[str] = '''img'''
else:
lowercase_ : List[Any] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ : Any = Compose(
[
Lambda(lambda __snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ : Optional[Any] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(__snake_case : Optional[int] ):
lowercase_ : Any = [transforms(__snake_case ) for image in examples[image_column_name]]
lowercase_ : Dict = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase_ : Any = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__snake_case )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase_ : Any = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__snake_case )
# Initialize our trainer
lowercase_ : Optional[int] = Trainer(
model=__snake_case , args=__snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , )
# Training
if training_args.do_train:
lowercase_ : Dict = None
if training_args.resume_from_checkpoint is not None:
lowercase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ : int = last_checkpoint
lowercase_ : str = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ : List[Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , __snake_case )
trainer.save_metrics('''eval''' , __snake_case )
# Write model card and (optionally) push to hub
lowercase_ : Any = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
if __name__ == "__main__":
main()
| 703 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Any = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : str = "OwlViTImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : str , A : str=None , A : List[Any]=None , **A : Union[str, Any] ) -> Tuple:
lowercase_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
lowercase_ : List[Any] = kwargs.pop('''feature_extractor''' )
lowercase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
def __call__( self : List[Any] , A : List[Any]=None , A : Any=None , A : List[str]=None , A : int="max_length" , A : Optional[Any]="np" , **A : Tuple ) -> Optional[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(A , A ) or (isinstance(A , A ) and not isinstance(text[0] , A )):
lowercase_ : Any = [self.tokenizer(A , padding=A , return_tensors=A , **A )]
elif isinstance(A , A ) and isinstance(text[0] , A ):
lowercase_ : int = []
# Maximum number of queries across batch
lowercase_ : str = max([len(A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A ) != max_num_queries:
lowercase_ : Union[str, Any] = t + [''' '''] * (max_num_queries - len(A ))
lowercase_ : List[Any] = self.tokenizer(A , padding=A , return_tensors=A , **A )
encodings.append(A )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowercase_ : Optional[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : List[Any] = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowercase_ : Tuple = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : str = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowercase_ : Any = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowercase_ : Optional[Any] = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowercase_ : str = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowercase_ : Tuple = BatchEncoding()
lowercase_ : int = input_ids
lowercase_ : Optional[Any] = attention_mask
if query_images is not None:
lowercase_ : Optional[Any] = BatchEncoding()
lowercase_ : Union[str, Any] = self.image_processor(
A , return_tensors=A , **A ).pixel_values
lowercase_ : Union[str, Any] = query_pixel_values
if images is not None:
lowercase_ : Union[str, Any] = self.image_processor(A , return_tensors=A , **A )
if text is not None and images is not None:
lowercase_ : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowercase_ : Any = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A ) , tensor_type=A )
def A ( self : List[str] , *A : int , **A : Dict ) -> Optional[int]:
return self.image_processor.post_process(*A , **A )
def A ( self : Tuple , *A : str , **A : List[str] ) -> Dict:
return self.image_processor.post_process_object_detection(*A , **A )
def A ( self : Union[str, Any] , *A : List[str] , **A : str ) -> Any:
return self.image_processor.post_process_image_guided_detection(*A , **A )
def A ( self : List[Any] , *A : Any , **A : Any ) -> List[str]:
return self.tokenizer.batch_decode(*A , **A )
def A ( self : List[Any] , *A : List[Any] , **A : int ) -> Union[str, Any]:
return self.tokenizer.decode(*A , **A )
@property
def A ( self : Optional[int] ) -> Tuple:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def A ( self : List[Any] ) -> List[Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 141 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Any = """"""
lowerCamelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase : str = None # compression type in fsspec. ex: "gzip"
lowerCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , _UpperCamelCase : str = "" , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[dict] = None , **_UpperCamelCase : List[str]):
super().__init__(self , **_UpperCamelCase)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowercase: Optional[int] = fsspec.open(
_UpperCamelCase , mode="rb" , protocol=_UpperCamelCase , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowercase: Union[str, Any] = os.path.basename(self.file.path.split("::")[0])
_lowercase: str = (
self.compressed_name[: self.compressed_name.rindex(".")]
if "." in self.compressed_name
else self.compressed_name
)
_lowercase: Any = None
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , _UpperCamelCase : Dict):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_UpperCamelCase).lstrip("/")
def UpperCAmelCase__ ( self : int):
if self.dir_cache is None:
_lowercase: List[str] = {**self.file.fs.info(self.file.path), "name": self.uncompressed_name}
_lowercase: Union[str, Any] = {f["name"]: f}
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : str):
return self.file.open().read()
def UpperCAmelCase__ ( self : str , _UpperCamelCase : str , _UpperCamelCase : str = "rb" , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Tuple=True , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : Tuple , ):
_lowercase: int = self._strip_protocol(_UpperCamelCase)
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
return self.file.open()
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Tuple = """bz2"""
lowerCamelCase : str = """bz2"""
lowerCamelCase : str = """.bz2"""
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = """gzip"""
lowerCamelCase : Tuple = """gzip"""
lowerCamelCase : int = """.gz"""
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = """lz4"""
lowerCamelCase : str = """lz4"""
lowerCamelCase : int = """.lz4"""
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : int = """xz"""
lowerCamelCase : List[str] = """xz"""
lowerCamelCase : List[str] = """.xz"""
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = """zstd"""
lowerCamelCase : Optional[Any] = """zstd"""
lowerCamelCase : Optional[int] = """.zst"""
def __init__( self : Tuple , _UpperCamelCase : str , _UpperCamelCase : str = "rb" , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[dict] = None , _UpperCamelCase : int = DEFAULT_BLOCK_SIZE , **_UpperCamelCase : List[Any] , ):
super().__init__(
fo=_UpperCamelCase , mode=_UpperCamelCase , target_protocol=_UpperCamelCase , target_options=_UpperCamelCase , block_size=_UpperCamelCase , **_UpperCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowercase: Dict = self.file.__enter__
class A :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple):
_lowercase: Tuple = file_
def __enter__( self : List[Any]):
self._file.__enter__()
return self
def __exit__( self : Dict , *_UpperCamelCase : int , **_UpperCamelCase : str):
self._file.__exit__(*_UpperCamelCase , **_UpperCamelCase)
def __iter__( self : Optional[Any]):
return iter(self._file)
def UpperCAmelCase__ ( self : List[str]):
return next(self._file)
def __getattr__( self : Tuple , _UpperCamelCase : Union[str, Any]):
return getattr(self._file , _UpperCamelCase)
def fixed_enter(*_UpperCamelCase : List[Any] , **_UpperCamelCase : str):
return WrappedFile(_enter(*_UpperCamelCase , **_UpperCamelCase))
_lowercase: str = fixed_enter
| 226 |
def __lowerCAmelCase ( __magic_name__ ):
if not isinstance(__magic_name__ , __magic_name__ ):
raise TypeError("only integers accepted as input" )
else:
_lowercase: Optional[Any] = str(abs(__magic_name__ ) )
_lowercase: Tuple = [list(__magic_name__ ) for char in range(len(__magic_name__ ) )]
for index in range(len(__magic_name__ ) ):
num_transpositions[index].pop(__magic_name__ )
return max(
int("".join(list(__magic_name__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 226 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_snake_case ) ,'Tatoeba directory does not exist.' )
class __a ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : str) ->Dict:
"""simple docstring"""
_lowercase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowercase__)
@slow
def _UpperCAmelCase ( self : str) ->Optional[Any]:
"""simple docstring"""
self.resolver.convert_models(["""heb-eng"""])
@slow
def _UpperCAmelCase ( self : Union[str, Any]) ->Any:
"""simple docstring"""
_lowercase , _lowercase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=lowercase__)
assert mmeta["long_pair"] == "heb-eng"
| 572 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ = 100 ):
_lowercase = set()
_lowercase = 0
_lowercase = n + 1 # maximum limit
for a in range(2 , snake_case_ ):
for b in range(2 , snake_case_ ):
_lowercase = a**b # calculates the current power
collect_powers.add(snake_case_ ) # adds the result to the set
return len(snake_case_ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 572 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A : int = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__A : str = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class __lowerCAmelCase ( a__):
'''simple docstring'''
__magic_name__ : Optional[Any] = VOCAB_FILES_NAMES
__magic_name__ : Dict = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : int = ["input_ids", "attention_mask"]
__magic_name__ : Optional[int] = RobertaTokenizer
def __init__( self : Union[str, Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : str="replace" , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Tuple="<mask>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=True , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ , **lowerCamelCase_ , )
A__ : Union[str, Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCamelCase_ ) != add_prefix_space:
A__ : List[str] =getattr(lowerCamelCase_ , pre_tok_state.pop("type" ) )
A__ : Tuple =add_prefix_space
A__ : Any =pre_tok_class(**lowerCamelCase_ )
A__ : Union[str, Any] =add_prefix_space
A__ : List[Any] ="post_processor"
A__ : int =getattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
if tokenizer_component_instance:
A__ : Optional[Any] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ : Tuple =tuple(state["sep"] )
if "cls" in state:
A__ : str =tuple(state["cls"] )
A__ : Any =False
if state.get("add_prefix_space" , lowerCamelCase_ ) != add_prefix_space:
A__ : int =add_prefix_space
A__ : Optional[Any] =True
if state.get("trim_offsets" , lowerCamelCase_ ) != trim_offsets:
A__ : List[Any] =trim_offsets
A__ : int =True
if changes_to_apply:
A__ : List[str] =getattr(lowerCamelCase_ , state.pop("type" ) )
A__ : int =component_class(**lowerCamelCase_ )
setattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
@property
def _UpperCAmelCase ( self : int ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : int ):
A__ : Optional[int] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else value
A__ : List[Any] =value
def _UpperCAmelCase ( self : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : str ):
A__ : str =kwargs.get("is_split_into_words" , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def _UpperCAmelCase ( self : Union[str, Any] , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Union[str, Any] ):
A__ : Optional[Any] =kwargs.get("is_split_into_words" , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] = None ):
A__ : Optional[int] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : str=None ):
A__ : str =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple = None ):
A__ : Tuple =[self.sep_token_id]
A__ : int =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 656 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = Path(tmpdirname)
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCAmelCase = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCAmelCase = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__UpperCAmelCase = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCAmelCase = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 90 | 0 |
from manim import *
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(UpperCamelCase__ ):
rect.set_stroke(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase__ , buff=0.0 )
self.add(UpperCamelCase__ )
model_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("""Loaded Checkpoint""" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(UpperCamelCase__ , opacity=0.7 )
target.move_to(UpperCamelCase__ )
ckpt_arr.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
f"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("""Disk""" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) , Write(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) )
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(FadeOut(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) )
self.play(
FadeOut(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ ) , )
self.wait()
| 710 |
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , a : int = 0 ) -> str:
SCREAMING_SNAKE_CASE = key
def _UpperCAmelCase ( self : Tuple , a : str , a : int ) -> list[str]:
assert isinstance(a , a ) and isinstance(a , a )
SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(a ) ^ key ) for ch in content]
def _UpperCAmelCase ( self : Union[str, Any] , a : str , a : int ) -> list[str]:
assert isinstance(a , a ) and isinstance(a , a )
SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(a ) ^ key ) for ch in content]
def _UpperCAmelCase ( self : Union[str, Any] , a : str , a : int = 0 ) -> str:
assert isinstance(a , a ) and isinstance(a , a )
SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE = """"""
for ch in content:
ans += chr(ord(a ) ^ key )
return ans
def _UpperCAmelCase ( self : Dict , a : str , a : int = 0 ) -> str:
assert isinstance(a , a ) and isinstance(a , a )
SCREAMING_SNAKE_CASE = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE = """"""
for ch in content:
ans += chr(ord(a ) ^ key )
return ans
def _UpperCAmelCase ( self : Optional[int] , a : str , a : int = 0 ) -> bool:
assert isinstance(a , a ) and isinstance(a , a )
try:
with open(a ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(a , a ) )
except OSError:
return False
return True
def _UpperCAmelCase ( self : Dict , a : str , a : int ) -> bool:
assert isinstance(a , a ) and isinstance(a , a )
try:
with open(a ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(a , a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 450 | 0 |
'''simple docstring'''
import math
def lowercase__( _UpperCamelCase : int )-> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase__( _UpperCamelCase : float = 0.1 )-> int:
"""simple docstring"""
_UpperCamelCase = 3
_UpperCamelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> List[Any]:
# Initialise PyTorch model
__a = RemBertConfig.from_json_file(lowerCAmelCase__ )
print('''Building PyTorch model from configuration: {}'''.format(str(lowerCAmelCase__ ) ) )
__a = RemBertModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(lowerCAmelCase__ ) )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--rembert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained RemBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 695 | 0 |
def A__ ( __A ):
'''simple docstring'''
def merge(__A , __A ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__A ) <= 1:
return collection
_lowerCamelCase : Optional[int] = len(__A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : Dict =input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase : Dict =[int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 703 | from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(
_UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , )
_lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths}
_lowerCamelCase : Any = Text(
cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = None
_lowerCamelCase : Dict = None
self.builder.download_and_prepare(
download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , )
_lowerCamelCase : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory)
return dataset
| 15 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
# save results
if os.path.exists(lowerCamelCase_ ):
if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'config.json' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ):
__a : Dict = 2
if unlogit:
__a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ )
__a : Any = p * torch.log(lowerCamelCase_ )
__a : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( lowerCamelCase_ : Any ):
logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) )
for row in range(len(lowerCamelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ):
__a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads
__a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
__a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
if head_mask is None:
__a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__a : Any = None
__a : Optional[int] = 0.0
__a : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__a : Dict = tuple(t.to(args.device ) for t in inputs )
((__a) , ) : Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__a , __a , __a : int = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase_ ):
__a : List[str] = entropy(attn.detach() , lowerCamelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__a : Optional[Any] = 2
__a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(lowerCamelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(lowerCamelCase_ )
logger.info('Head ranked by importance scores' )
__a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__a : str = torch.arange(
head_importance.numel() , device=args.device )
__a : Tuple = head_ranks.view_as(lowerCamelCase_ )
print_ad_tensor(lowerCamelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
__a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ )
__a : Tuple = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold )
__a : Tuple = torch.ones_like(lowerCamelCase_ )
__a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__a : Tuple = original_score
while current_score >= original_score * args.masking_threshold:
__a : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__a : List[str] = float('Inf' )
__a : List[Any] = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__a : Any = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__a : int = new_head_mask.view(-1 )
__a : Tuple = 0.0
__a : int = new_head_mask.view_as(lowerCamelCase_ )
__a : Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase_ )
# Compute metric and head importance again
__a , __a , __a : int = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('Final head mask' )
print_ad_tensor(lowerCamelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
__a : List[Any] = datetime.now()
__a , __a , __a : List[str] = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ )
__a : List[str] = 1 / loss
__a : List[Any] = datetime.now() - before_time
__a : List[str] = sum(p.numel() for p in model.parameters() )
__a : Dict = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__a : Tuple = [
v,
]
assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase_ )
__a : Optional[Any] = sum(p.numel() for p in model.parameters() )
__a : Tuple = datetime.now()
__a , __a , __a : Tuple = compute_heads_importance(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , )
__a : Optional[Any] = 1 / loss
__a : List[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 )
save_model(lowerCamelCase_ , args.output_dir )
def UpperCAmelCase__ ( ):
__a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 )
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' )
__a : Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__a : Tuple = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__a : Union[str, Any] = torch.device('cuda' , args.local_rank )
__a : Any = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__a : List[Any] = nn.parallel.DistributedDataParallel(
lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ )
elif args.n_gpu > 1:
__a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , lowerCamelCase_ )
# Prepare dataset
__a : Tuple = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__a : str = (torch.from_numpy(lowerCamelCase_ ),)
__a : List[str] = TensorDataset(*lowerCamelCase_ )
__a : Optional[Any] = RandomSampler(lowerCamelCase_ )
__a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 47 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = val
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
SCREAMING_SNAKE_CASE : Optional[int] = Node(A )
else:
self.left.insert(A )
elif val > self.val:
if self.right is None:
SCREAMING_SNAKE_CASE : int = Node(A )
else:
self.right.insert(A )
else:
SCREAMING_SNAKE_CASE : int = val
def lowercase__( __UpperCamelCase: Optional[int] ,__UpperCamelCase: List[str] ):
"""simple docstring"""
if root:
inorder(root.left ,__UpperCamelCase )
res.append(root.val )
inorder(root.right ,__UpperCamelCase )
def lowercase__( __UpperCamelCase: List[Any] ):
"""simple docstring"""
if len(__UpperCamelCase ) == 0:
return arr
SCREAMING_SNAKE_CASE : Optional[int] = Node(arr[0] )
for i in range(1 ,len(__UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
SCREAMING_SNAKE_CASE : Dict = []
inorder(__UpperCamelCase ,__UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([1_0, 1, 3, 2, 9, 1_4, 1_3]))
| 28 | 0 |
'''simple docstring'''
from __future__ import annotations
class _lowerCamelCase :
'''simple docstring'''
def __init__( self , __lowercase ):
"""simple docstring"""
__A : Dict = order
# a_{0} ... a_{k}
__A : str = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__A : Any = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__A : Optional[int] = [0.0] * self.order
# y[n-1] ... y[n-k]
__A : Dict = [0.0] * self.order
def snake_case__ ( self , __lowercase , __lowercase ):
"""simple docstring"""
if len(__lowercase ) < self.order:
__A : List[Any] = [1.0, *a_coeffs]
if len(__lowercase ) != self.order + 1:
__A : Tuple = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__lowercase )}"""
)
raise ValueError(__lowercase )
if len(__lowercase ) != self.order + 1:
__A : List[Any] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__lowercase )}"""
)
raise ValueError(__lowercase )
__A : Optional[int] = a_coeffs
__A : Optional[int] = b_coeffs
def snake_case__ ( self , __lowercase ):
"""simple docstring"""
__A : Union[str, Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__A : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__A : List[str] = self.input_history[:-1]
__A : Optional[int] = self.output_history[:-1]
__A : Optional[int] = sample
__A : Dict = result
return result
| 540 |
'''simple docstring'''
from math import pi, sqrt, tan
def _lowercase ( UpperCamelCase__ : float ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _lowercase ( UpperCamelCase__ : float ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _lowercase ( UpperCamelCase__ : float ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__A : Optional[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(UpperCamelCase__, 2 ) * torus_radius * tube_radius
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _lowercase ( UpperCamelCase__ : float ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__A : int = (sidea + sidea + sidea) / 2
__A : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float, UpperCamelCase__ : float ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _lowercase ( UpperCamelCase__ : float ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : float ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : float ):
if not isinstance(UpperCamelCase__, UpperCamelCase__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f'''Rectangle: {area_rectangle(1_0, 2_0) = }''')
print(f'''Square: {area_square(1_0) = }''')
print(f'''Triangle: {area_triangle(1_0, 1_0) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }''')
print(f'''Parallelogram: {area_parallelogram(1_0, 2_0) = }''')
print(f'''Rhombus: {area_rhombus(1_0, 2_0) = }''')
print(f'''Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }''')
print(f'''Circle: {area_circle(2_0) = }''')
print(f'''Ellipse: {area_ellipse(1_0, 2_0) = }''')
print('\nSurface Areas of various geometric shapes: \n')
print(f'''Cube: {surface_area_cube(2_0) = }''')
print(f'''Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }''')
print(f'''Sphere: {surface_area_sphere(2_0) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(2_0) = }''')
print(f'''Cone: {surface_area_cone(1_0, 2_0) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }''')
print(f'''Cylinder: {surface_area_cylinder(1_0, 2_0) = }''')
print(f'''Torus: {surface_area_torus(2_0, 1_0) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 1_0) = }''')
print(f'''Square: {area_reg_polygon(4, 1_0) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 1_0) = }''')
| 540 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=4 , ):
'''simple docstring'''
UpperCamelCase__ :int = parent
UpperCamelCase__ :List[Any] = batch_size
UpperCamelCase__ :Any = seq_length
UpperCamelCase__ :Optional[int] = is_training
UpperCamelCase__ :str = use_attention_mask
UpperCamelCase__ :str = use_token_type_ids
UpperCamelCase__ :Any = use_labels
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Tuple = hidden_size
UpperCamelCase__ :Tuple = num_hidden_layers
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :str = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :Optional[int] = max_position_embeddings
UpperCamelCase__ :int = type_vocab_size
UpperCamelCase__ :int = type_sequence_label_size
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :Union[str, Any] = num_choices
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :Union[str, Any] = None
if self.use_attention_mask:
UpperCamelCase__ :List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ :Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ :int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Any = config_and_inputs
UpperCamelCase__ :List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase__ :Tuple = model_class_name.from_pretrained('''albert-base-v2''' )
UpperCamelCase__ :Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
UpperCamelCase__ :List[str] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase__ :Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase__ :List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
UpperCamelCase__ :Any = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase_ )
UpperCamelCase__ :Dict = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1e-4 ) ) | 189 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ = None ):
'''simple docstring'''
if components is None:
UpperCamelCase__ :List[Any] = []
UpperCamelCase__ :Optional[Any] = list(UpperCamelCase_ )
def __len__( self ):
'''simple docstring'''
return len(self.__components )
def __str__( self ):
'''simple docstring'''
return "(" + ",".join(map(UpperCamelCase_ , self.__components ) ) + ")"
def __add__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Any = len(self )
if size == len(UpperCamelCase_ ):
UpperCamelCase__ :Dict = [self.__components[i] + other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return Vector(UpperCamelCase_ )
else:
raise Exception('''must have the same size''' )
def __sub__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = len(self )
if size == len(UpperCamelCase_ ):
UpperCamelCase__ :List[str] = [self.__components[i] - other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return Vector(UpperCamelCase_ )
else: # error case
raise Exception('''must have the same size''' )
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , (float, int) ):
UpperCamelCase__ :Optional[Any] = [c * other for c in self.__components]
return Vector(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(self ) == len(UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = len(self )
UpperCamelCase__ :List[Any] = [self.__components[i] * other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )]
return sum(UpperCamelCase_ )
else: # error case
raise Exception('''invalid operand!''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return Vector(self.__components )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('''index out of range''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
UpperCamelCase__ :Optional[int] = value
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception('''Vector is empty''' )
UpperCamelCase__ :List[str] = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = False ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self * other
UpperCamelCase__ :Optional[int] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a ( __a ) -> Vector:
'''simple docstring'''
assert isinstance(__a , __a )
return Vector([0] * dimension )
def a ( __a , __a ) -> Vector:
'''simple docstring'''
assert isinstance(__a , __a ) and (isinstance(__a , __a ))
UpperCamelCase__ :str = [0] * dimension
UpperCamelCase__ :Tuple = 1
return Vector(__a )
def a ( __a , __a , __a ) -> Vector:
'''simple docstring'''
assert (
isinstance(__a , __a )
and isinstance(__a , __a )
and (isinstance(__a , (int, float) ))
)
return x * scalar + y
def a ( __a , __a , __a ) -> Vector:
'''simple docstring'''
random.seed(__a )
UpperCamelCase__ :Optional[Any] = [random.randint(__a , __a ) for _ in range(__a )]
return Vector(__a )
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = matrix
UpperCamelCase__ :Union[str, Any] = w
UpperCamelCase__ :Optional[int] = h
def __str__( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = ''''''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , UpperCamelCase_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :int = []
for i in range(self.__height ):
UpperCamelCase__ :int = [
self.__matrix[i][j] + other.component(UpperCamelCase_ , UpperCamelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase_ )
return Matrix(UpperCamelCase_ , self.__width , self.__height )
else:
raise Exception('''matrix must have the same dimension!''' )
def __sub__( self , UpperCamelCase_ ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
UpperCamelCase__ :Optional[Any] = []
for i in range(self.__height ):
UpperCamelCase__ :Optional[int] = [
self.__matrix[i][j] - other.component(UpperCamelCase_ , UpperCamelCase_ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase_ )
return Matrix(UpperCamelCase_ , self.__width , self.__height )
else:
raise Exception('''matrices must have the same dimension!''' )
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
@overload
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
...
def __mul__( self , UpperCamelCase_ ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ): # matrix-vector
if len(UpperCamelCase_ ) == self.__width:
UpperCamelCase__ :Any = zero_vector(self.__height )
for i in range(self.__height ):
UpperCamelCase__ :Any = [
self.__matrix[i][j] * other.component(UpperCamelCase_ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase_ , sum(UpperCamelCase_ ) )
return ans
else:
raise Exception(
'''vector must have the same size as the '''
'''number of columns of the matrix!''' )
elif isinstance(UpperCamelCase_ , (int, float) ): # matrix-scalar
UpperCamelCase__ :Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase_ , self.__width , self.__height )
return None
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.__height
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.__width
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('''change_component: indices out of bounds''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCamelCase__ :Tuple = value
else:
raise Exception('''change_component: indices out of bounds''' )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
UpperCamelCase__ :str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase_ ) ):
UpperCamelCase__ :Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase_ , UpperCamelCase_ )
else:
raise Exception('''Indices out of bounds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception('''Matrix is not square''' )
if self.__height < 1:
raise Exception('''Matrix has no element''' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCamelCase__ :Optional[Any] = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase_ ) for y in range(self.__width )
]
return sum(UpperCamelCase_ )
def a ( __a ) -> Matrix:
'''simple docstring'''
UpperCamelCase__ :list[list[float]] = [[0] * n for _ in range(__a )]
return Matrix(__a , __a , __a )
def a ( __a , __a , __a , __a ) -> Matrix:
'''simple docstring'''
random.seed(__a )
UpperCamelCase__ :list[list[float]] = [
[random.randint(__a , __a ) for _ in range(__a )] for _ in range(__a )
]
return Matrix(__a , __a , __a ) | 189 | 1 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__( enum.Enum ):
lowerCAmelCase = 0
lowerCAmelCase = 1
lowerCAmelCase = 2
@add_end_docstrings(__magic_name__ )
class A__( __magic_name__ ):
lowerCAmelCase = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__SCREAMING_SNAKE_CASE = None
if self.model.config.prefix is not None:
__SCREAMING_SNAKE_CASE = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__SCREAMING_SNAKE_CASE = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self._sanitize_parameters(prefix=__SCREAMING_SNAKE_CASE , **self._forward_params )
__SCREAMING_SNAKE_CASE = {**self._preprocess_params, **preprocess_params}
__SCREAMING_SNAKE_CASE = {**self._forward_params, **forward_params}
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
if prefix is not None:
__SCREAMING_SNAKE_CASE = prefix
if prefix:
__SCREAMING_SNAKE_CASE = self.tokenizer(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
__SCREAMING_SNAKE_CASE = handle_long_generation
preprocess_params.update(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = generate_kwargs
__SCREAMING_SNAKE_CASE = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__SCREAMING_SNAKE_CASE = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__SCREAMING_SNAKE_CASE = ReturnType.TENSORS
if return_type is not None:
__SCREAMING_SNAKE_CASE = return_type
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if stop_sequence is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__SCREAMING_SNAKE_CASE = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _a ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __call__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]="" , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(
prefix + prompt_text , padding=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = prompt_text
if handle_long_generation == "hole":
__SCREAMING_SNAKE_CASE = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__SCREAMING_SNAKE_CASE = generate_kwargs['''max_new_tokens''']
else:
__SCREAMING_SNAKE_CASE = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__SCREAMING_SNAKE_CASE = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__SCREAMING_SNAKE_CASE = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def _a ( self : int , __SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_inputs['''input_ids''']
__SCREAMING_SNAKE_CASE = model_inputs.get('''attention_mask''' , __SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
else:
__SCREAMING_SNAKE_CASE = input_ids.shape[0]
__SCREAMING_SNAKE_CASE = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__SCREAMING_SNAKE_CASE = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__SCREAMING_SNAKE_CASE = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__SCREAMING_SNAKE_CASE = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__SCREAMING_SNAKE_CASE = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__SCREAMING_SNAKE_CASE = self.model.generate(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = generated_sequence.shape[0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = generated_sequence.reshape(__SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.reshape(__SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=ReturnType.FULL_TEXT , __SCREAMING_SNAKE_CASE : List[str]=True ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = model_outputs['''generated_sequence'''][0]
__SCREAMING_SNAKE_CASE = model_outputs['''input_ids''']
__SCREAMING_SNAKE_CASE = model_outputs['''prompt_text''']
__SCREAMING_SNAKE_CASE = generated_sequence.numpy().tolist()
__SCREAMING_SNAKE_CASE = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__SCREAMING_SNAKE_CASE = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(
__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__SCREAMING_SNAKE_CASE = 0
else:
__SCREAMING_SNAKE_CASE = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__SCREAMING_SNAKE_CASE = prompt_text + text[prompt_length:]
else:
__SCREAMING_SNAKE_CASE = text[prompt_length:]
__SCREAMING_SNAKE_CASE = {'''generated_text''': all_text}
records.append(__SCREAMING_SNAKE_CASE )
return records
| 713 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase__ =logging.get_logger(__name__)
lowerCAmelCase__ ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class A__( __magic_name__ ):
lowerCAmelCase = '''van'''
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=2_24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : Tuple=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE : Optional[int]=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE : str=[64, 1_28, 3_20, 5_12] , __SCREAMING_SNAKE_CASE : Optional[Any]=[3, 3, 12, 3] , __SCREAMING_SNAKE_CASE : Dict=[8, 8, 4, 4] , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-6 , __SCREAMING_SNAKE_CASE : Any=1E-2 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : str , ) -> List[str]:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_ratios
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = dropout_rate
| 690 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
"""configuration_xmod""": [
"""XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XmodConfig""",
"""XmodOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XmodForCausalLM""",
"""XmodForMaskedLM""",
"""XmodForMultipleChoice""",
"""XmodForQuestionAnswering""",
"""XmodForSequenceClassification""",
"""XmodForTokenClassification""",
"""XmodModel""",
"""XmodPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
"""simple docstring"""
import numpy as np
from PIL import Image
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
__UpperCAmelCase : str = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Tuple = 0
# compute the shape of the output matrix
__UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
return updated_arr
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
__UpperCAmelCase : List[str] = np.array(UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = 0
# compute the shape of the output matrix
__UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
A = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 77 | 1 |
'''simple docstring'''
from math import isclose, sqrt
def __UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float ) -> tuple[float, float, float]:
'''simple docstring'''
_a = point_y / 4 / point_x
_a = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_a = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_a = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_a = outgoing_gradient**2 + 4
_a = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_a = (point_y - outgoing_gradient * point_x) ** 2 - 100
_a = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_a = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_a = x_minus if isclose(__lowerCamelCase , __lowerCamelCase ) else x_plus
_a = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __UpperCamelCase ( __lowerCamelCase : float = 1.4 , __lowerCamelCase : float = -9.6 ) -> int:
'''simple docstring'''
_a = 0
_a = first_x_coord
_a = first_y_coord
_a = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_a , _a , _a = next_point(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'''{solution() = }''')
| 720 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
_a , _a = 9, 14 # noqa: F841
_a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a = defaultdict(__lowerCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_a = mst(__lowerCamelCase )
_a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_a = tuple(answer[:2] )
_a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 276 | 0 |
from __future__ import annotations
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = [True] * limit
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : str = True
for i in range(3, int(limit**0.5 + 1 ), 2 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = i * 2
while index < limit:
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : List[str] = index + i
SCREAMING_SNAKE_CASE_ : List[Any] = [2]
for i in range(3, A__, 2 ):
if is_prime[i]:
primes.append(A__ )
return primes
def a__ ( A__ = 1_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE_ : List[str] = prime_sieve(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 0
for i in range(len(A__ ) ):
for j in range(i + length, len(A__ ) ):
SCREAMING_SNAKE_CASE_ : int = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE_ : Tuple = j - i
SCREAMING_SNAKE_CASE_ : Tuple = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 101 |
from __future__ import annotations
def a__ ( A__, A__ = None, A__ = None ):
if start is None:
SCREAMING_SNAKE_CASE_ : List[str] = 0
if end is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(A__ ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE_ : Tuple = (start + end) // 2
slowsort(A__, A__, A__ )
slowsort(A__, mid + 1, A__ )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sequence[mid], sequence[end]
slowsort(A__, A__, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 101 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = set({"""(""", """[""", """{"""} )
UpperCAmelCase = set({""")""", """]""", """}"""} )
UpperCAmelCase = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCAmelCase ) == 0 or (len(lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCAmelCase ) == 0
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCAmelCase ):
print(lowerCAmelCase , """is balanced""" )
else:
print(lowerCAmelCase , """is not balanced""" )
if __name__ == "__main__":
main()
| 378 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( a_ ):
_A : List[str] = ['pixel_values']
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = 0.9 , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = True , snake_case__ = None , snake_case__ = 1 / 2_55 , snake_case__ = True , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ) -> None:
"""simple docstring"""
super().__init__(**snake_case__ )
UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_24}
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
UpperCAmelCase = get_size_dict(snake_case__ , param_name="""crop_size""" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = crop_pct
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
if crop_pct is not None:
if "shortest_edge" in size:
UpperCAmelCase = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
UpperCAmelCase = int(size["""height"""] / crop_pct )
else:
UpperCAmelCase = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(snake_case__ ) )
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=snake_case__ , default_to_square=snake_case__ )
else:
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(snake_case__ , size=size["""shortest_edge"""] , default_to_square=snake_case__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(snake_case__ ) )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(snake_case__ , size=(size["""height"""], size["""width"""]) , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> Tuple:
"""simple docstring"""
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ) -> np.ndarray:
"""simple docstring"""
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(snake_case__ , default_to_square=snake_case__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(snake_case__ , param_name="""crop_size""" )
UpperCAmelCase = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=snake_case__ , size=snake_case__ , crop_pct=snake_case__ , resample=snake_case__ ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=snake_case__ , size=snake_case__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 378 | 1 |
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def a__ ( _SCREAMING_SNAKE_CASE : int = 1_00 ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : Any = 2
for i in range(2 , max_n + 1 ):
UpperCAmelCase_ : Optional[int] = pre_numerator
UpperCAmelCase_ : int = 2 * i // 3 if i % 3 == 0 else 1
UpperCAmelCase_ : Any = cur_numerator
UpperCAmelCase_ : List[str] = e_cont * pre_numerator + temp
return sum_digits(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 71 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
_lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _snake_case (datasets.Metric):
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = 1 ,_snake_case = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case ,hypotheses=_snake_case ,min_len=_snake_case ,max_len=_snake_case )
}
| 71 | 1 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase_ = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _snake_case :
"""simple docstring"""
a = PegasusConfig
a = {}
a = "gelu"
def __init__( self : List[str] , _A : str , _A : str=1_3 , _A : Dict=7 , _A : Tuple=True , _A : Tuple=False , _A : Any=9_9 , _A : Union[str, Any]=3_2 , _A : Dict=5 , _A : List[str]=4 , _A : Union[str, Any]=3_7 , _A : int=0.1 , _A : Union[str, Any]=0.1 , _A : Any=2_0 , _A : List[str]=2 , _A : Optional[int]=1 , _A : Optional[Any]=0 , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = parent
_SCREAMING_SNAKE_CASE : int = batch_size
_SCREAMING_SNAKE_CASE : List[Any] = seq_length
_SCREAMING_SNAKE_CASE : Tuple = is_training
_SCREAMING_SNAKE_CASE : Tuple = use_labels
_SCREAMING_SNAKE_CASE : Dict = vocab_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_size
_SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Tuple = eos_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
_SCREAMING_SNAKE_CASE : Any = bos_token_id
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size).clip(3 , self.vocab_size)
_SCREAMING_SNAKE_CASE : List[str] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size) , 1)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1)
_SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_SCREAMING_SNAKE_CASE : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_SCREAMING_SNAKE_CASE : Any = prepare_pegasus_inputs_dict(_A , _A , _A)
return config, inputs_dict
def _lowerCAmelCase ( self : Dict , _A : Optional[int] , _A : int , _A : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_0
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_class_name(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = model.encode(inputs_dict["""input_ids"""])
_SCREAMING_SNAKE_CASE : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE : Dict = model.init_cache(decoder_input_ids.shape[0] , _A , _A)
_SCREAMING_SNAKE_CASE : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""")
_SCREAMING_SNAKE_CASE : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
_SCREAMING_SNAKE_CASE : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_A , _A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def _lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] , _A : List[Any] , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = 2_0
_SCREAMING_SNAKE_CASE : int = model_class_name(_A)
_SCREAMING_SNAKE_CASE : Tuple = model.encode(inputs_dict["""input_ids"""])
_SCREAMING_SNAKE_CASE : Any = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
_SCREAMING_SNAKE_CASE : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A)
_SCREAMING_SNAKE_CASE : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_SCREAMING_SNAKE_CASE : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
_SCREAMING_SNAKE_CASE : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""")
_SCREAMING_SNAKE_CASE : List[str] = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
_SCREAMING_SNAKE_CASE : List[str] = model.decode(_A , _A , decoder_attention_mask=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , )-> Tuple:
if attention_mask is None:
_SCREAMING_SNAKE_CASE : int = np.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _snake_case ( __snake_case , unittest.TestCase ):
"""simple docstring"""
a = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a = True
a = False
a = False
a = False
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = FlaxPegasusModelTester(self)
_SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=_A)
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(_A , _A)
_SCREAMING_SNAKE_CASE : str = model_class(_A)
@jax.jit
def encode_jitted(_A : str , _A : Tuple=None , **_A : Any):
return model.encode(input_ids=_A , attention_mask=_A)
with self.subTest("""JIT Enabled"""):
_SCREAMING_SNAKE_CASE : Any = encode_jitted(**_A).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : List[Any] = encode_jitted(**_A).to_tuple()
self.assertEqual(len(_A) , len(_A))
for jitted_output, output in zip(_A , _A):
self.assertEqual(jitted_output.shape , output.shape)
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_SCREAMING_SNAKE_CASE : Any = model_class(_A)
_SCREAMING_SNAKE_CASE : str = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""])
_SCREAMING_SNAKE_CASE : List[str] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[Any] , _A : Optional[int] , _A : int):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest("""JIT Enabled"""):
_SCREAMING_SNAKE_CASE : Dict = decode_jitted(**_A).to_tuple()
with self.subTest("""JIT Disabled"""):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : List[Any] = decode_jitted(**_A).to_tuple()
self.assertEqual(len(_A) , len(_A))
for jitted_output, output in zip(_A , _A):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=_A)
_SCREAMING_SNAKE_CASE : Optional[int] = np.ones((1, 1))
_SCREAMING_SNAKE_CASE : List[str] = model(_A)
self.assertIsNotNone(_A)
@slow
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""")
_SCREAMING_SNAKE_CASE : Any = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""")
_SCREAMING_SNAKE_CASE : List[Any] = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_SCREAMING_SNAKE_CASE : Any = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""np""" , truncation=_A , max_length=5_1_2 , padding=_A)
_SCREAMING_SNAKE_CASE : int = model.generate(**_A , num_beams=2).sequences
_SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(_A , skip_special_tokens=_A)
assert tgt_text == decoded
| 712 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
_SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Any = True
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 0.66_46_94
_SCREAMING_SNAKE_CASE : str = 0.20_79_51
_SCREAMING_SNAKE_CASE : str = 0.12_11_94
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13
_SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple = False
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 36.45_19
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Dict = 0.76_31_41
_SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
_SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
elif task == "MLM":
_SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
_SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 635 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "efficientnet"
def __init__( self : Optional[Any] , __snake_case : int = 3 , __snake_case : int = 6_0_0 , __snake_case : float = 2.0 , __snake_case : float = 3.1 , __snake_case : int = 8 , __snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , __snake_case : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case : List[int] = [] , __snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , __snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , __snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , __snake_case : float = 0.25 , __snake_case : str = "swish" , __snake_case : int = 2_5_6_0 , __snake_case : str = "mean" , __snake_case : float = 0.02 , __snake_case : float = 0.001 , __snake_case : float = 0.99 , __snake_case : float = 0.5 , __snake_case : float = 0.2 , **__snake_case : List[Any] , ) -> List[Any]:
super().__init__(**__snake_case )
__magic_name__: str = num_channels
__magic_name__: List[str] = image_size
__magic_name__: List[str] = width_coefficient
__magic_name__: Optional[Any] = depth_coefficient
__magic_name__: Tuple = depth_divisor
__magic_name__: Dict = kernel_sizes
__magic_name__: int = in_channels
__magic_name__: str = out_channels
__magic_name__: Dict = depthwise_padding
__magic_name__: Union[str, Any] = strides
__magic_name__: Dict = num_block_repeats
__magic_name__: Tuple = expand_ratios
__magic_name__: List[str] = squeeze_expansion_ratio
__magic_name__: Any = hidden_act
__magic_name__: Tuple = hidden_dim
__magic_name__: int = pooling_type
__magic_name__: int = initializer_range
__magic_name__: List[str] = batch_norm_eps
__magic_name__: str = batch_norm_momentum
__magic_name__: List[str] = dropout_rate
__magic_name__: Dict = drop_connect_rate
__magic_name__: Optional[Any] = sum(__snake_case ) * 4
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
return 1E-5
| 96 |
import math
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [True] * n
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Optional[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
_lowerCAmelCase : Union[str, Any] = i * 2
while index < n:
_lowerCAmelCase : int = False
_lowerCAmelCase : Optional[Any] = index + i
_lowerCAmelCase : str = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def A ( _lowerCamelCase = 999_966_663_333 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = math.floor(math.sqrt(_lowerCamelCase ) ) + 100
_lowerCAmelCase : str = prime_sieve(_lowerCamelCase )
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
_lowerCAmelCase : int = primes[prime_index + 1]
_lowerCAmelCase : Dict = last_prime**2
_lowerCAmelCase : Optional[Any] = next_prime**2
# Get numbers divisible by lps(current)
_lowerCAmelCase : List[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
_lowerCAmelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
_lowerCAmelCase : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
_lowerCAmelCase : Optional[int] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 500 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
'''simple docstring'''
return getitem, k
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Tuple ):
'''simple docstring'''
return setitem, k, v
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
return delitem, k
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : int , *__snake_case : List[str] ):
'''simple docstring'''
try:
return fun(__snake_case , *__snake_case ), None
except Exception as e:
return None, e
_UpperCamelCase : List[Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_UpperCamelCase : List[str] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_UpperCamelCase : int = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_UpperCamelCase : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_UpperCamelCase : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_UpperCamelCase : Union[str, Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
lowercase = HashMap(initial_block_size=4 )
lowercase = {}
for _, (fun, *args) in enumerate(__snake_case ):
lowercase , lowercase = _run_operation(__snake_case , __snake_case , *__snake_case )
lowercase , lowercase = _run_operation(__snake_case , __snake_case , *__snake_case )
assert my_res == py_res
assert str(__snake_case ) == str(__snake_case )
assert set(__snake_case ) == set(__snake_case )
assert len(__snake_case ) == len(__snake_case )
assert set(my.items() ) == set(py.items() )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
def is_public(__snake_case : str ) -> bool:
return not name.startswith('_' )
lowercase = {name for name in dir({} ) if is_public(__snake_case )}
lowercase = {name for name in dir(HashMap() ) if is_public(__snake_case )}
assert dict_public_names > hash_public_names
| 134 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class a ( a_ ):
UpperCAmelCase_ : Optional[Any] ="mobilenet_v2"
def __init__( self , _lowerCamelCase=3 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1.0 , _lowerCamelCase=8 , _lowerCamelCase=8 , _lowerCamelCase=6 , _lowerCamelCase=3_2 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase="relu6" , _lowerCamelCase=True , _lowerCamelCase=0.8 , _lowerCamelCase=0.0_2 , _lowerCamelCase=0.0_0_1 , _lowerCamelCase=2_5_5 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
lowercase = num_channels
lowercase = image_size
lowercase = depth_multiplier
lowercase = depth_divisible_by
lowercase = min_depth
lowercase = expand_ratio
lowercase = output_stride
lowercase = first_layer_is_expansion
lowercase = finegrained_output
lowercase = hidden_act
lowercase = tf_padding
lowercase = classifier_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = semantic_loss_ignore_index
class a ( a_ ):
UpperCAmelCase_ : Dict =version.parse("1.11" )
@property
def UpperCamelCase_ ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def UpperCamelCase_ ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def UpperCamelCase_ ( self ):
return 1e-4
| 134 | 1 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class UpperCamelCase( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
snake_case_ : Any = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def _lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
__snake_case = CursorInfo()
__snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
__snake_case = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def _lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
__snake_case = CursorInfo()
__snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
__snake_case = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def _lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 371 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A : List[str] = logging.get_logger(__name__)
A : List[Any] = {'vocab_file': 'spiece.model'}
A : Tuple = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
A : Any = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
A : Tuple = 0
A : str = 1
A : str = 2
A : Union[str, Any] = 3
A : Optional[Any] = 4
class UpperCamelCase( _a ):
snake_case_ : Union[str, Any] = VOCAB_FILES_NAMES
snake_case_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Tuple = """left"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Optional[int]="</s>" , SCREAMING_SNAKE_CASE : List[str]="<unk>" , SCREAMING_SNAKE_CASE : List[str]="<sep>" , SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE : Union[str, Any]="<cls>" , SCREAMING_SNAKE_CASE : str="<mask>" , SCREAMING_SNAKE_CASE : int=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : Dict , ) -> None:
'''simple docstring'''
__snake_case = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Tuple:
'''simple docstring'''
__snake_case = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> str:
'''simple docstring'''
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
'''simple docstring'''
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ) -> int:
'''simple docstring'''
if self.remove_space:
__snake_case = " ".join(inputs.strip().split() )
else:
__snake_case = inputs
__snake_case = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__snake_case = unicodedata.normalize("NFKD" , SCREAMING_SNAKE_CASE )
__snake_case = "".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
__snake_case = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : str ) -> List[str]:
'''simple docstring'''
__snake_case = self.preprocess_text(SCREAMING_SNAKE_CASE )
__snake_case = self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
__snake_case = []
for piece in pieces:
if len(SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__snake_case = cur_pieces[1:]
else:
__snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(SCREAMING_SNAKE_CASE )
else:
new_pieces.append(SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : int ) -> List[Any]:
'''simple docstring'''
__snake_case = "".join(SCREAMING_SNAKE_CASE ).replace(SCREAMING_SNAKE_CASE , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Optional[int] , ) -> str:
'''simple docstring'''
__snake_case = kwargs.pop("use_source_tokenizer" , SCREAMING_SNAKE_CASE )
__snake_case = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case = []
__snake_case = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) )
__snake_case = []
sub_texts.append(SCREAMING_SNAKE_CASE )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__snake_case = "".join(SCREAMING_SNAKE_CASE )
__snake_case = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case = self.clean_up_tokenization(SCREAMING_SNAKE_CASE )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 371 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 578 |
import re
from filelock import FileLock
try:
import nltk
a__ = True
except (ImportError, ModuleNotFoundError):
a__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
re.sub('''<n>''' ,'''''' ,__a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 578 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , *lowercase_ : List[Any] , lowercase_ : List[str]=None , lowercase_ : int=None , **lowercase_ : Union[str, Any]) -> Tuple:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
_UpperCamelCase = eval_examples
_UpperCamelCase = post_process_function
def __UpperCAmelCase ( self : Dict , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : Any=None , lowercase_ : str = "eval") -> Any:
"""simple docstring"""
_UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCamelCase = self.get_eval_dataloader(lowercase_)
_UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase = self.compute_metrics
_UpperCamelCase = None
_UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCamelCase = time.time()
try:
_UpperCamelCase = eval_loop(
lowercase_ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
_UpperCamelCase = compute_metrics
_UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCamelCase = self.post_process_function(lowercase_ , lowercase_ , output.predictions)
_UpperCamelCase = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
_UpperCamelCase = metrics.pop(lowercase_)
metrics.update(output.metrics)
else:
_UpperCamelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase_)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
_UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_)
return metrics
def __UpperCAmelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str=None , lowercase_ : str = "test") -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.get_test_dataloader(lowercase_)
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCamelCase = self.compute_metrics
_UpperCamelCase = None
_UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_UpperCamelCase = time.time()
try:
_UpperCamelCase = eval_loop(
lowercase_ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , )
finally:
_UpperCamelCase = compute_metrics
_UpperCamelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , ))
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCamelCase = self.post_process_function(lowercase_ , lowercase_ , output.predictions , "predict")
_UpperCamelCase = self.compute_metrics(lowercase_)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
_UpperCamelCase = metrics.pop(lowercase_)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
| 547 | from __future__ import annotations
def lowerCAmelCase__ ( a__ , a__ = None , a__ = None , a__ = False , ) ->tuple[int, float, str]:
'''simple docstring'''
_UpperCamelCase = cipher_alphabet or [chr(a__ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_UpperCamelCase = {
"a": 0.08497,
"b": 0.01492,
"c": 0.02202,
"d": 0.04253,
"e": 0.11162,
"f": 0.02228,
"g": 0.02015,
"h": 0.06094,
"i": 0.07546,
"j": 0.00153,
"k": 0.01292,
"l": 0.04025,
"m": 0.02406,
"n": 0.06749,
"o": 0.07507,
"p": 0.01929,
"q": 0.00095,
"r": 0.07587,
"s": 0.06327,
"t": 0.09356,
"u": 0.02758,
"v": 0.00978,
"w": 0.02560,
"x": 0.00150,
"y": 0.01994,
"z": 0.00077,
}
else:
# Custom frequencies dictionary
_UpperCamelCase = frequencies_dict
if not case_sensitive:
_UpperCamelCase = ciphertext.lower()
# Chi squared statistic values
_UpperCamelCase = {}
# cycle through all of the shifts
for shift in range(len(a__ ) ):
_UpperCamelCase = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len(
a__ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_UpperCamelCase = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_UpperCamelCase = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.lower().count(a__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_UpperCamelCase = decrypted_with_shift.count(a__ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_UpperCamelCase = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_UpperCamelCase = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_UpperCamelCase = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_UpperCamelCase = min(
a__ , key=a__ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 547 | 1 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def snake_case__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =StableDiffusionPipeline.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCamelCase__ : str =load_file(__lowerCamelCase )
lowerCamelCase__ : Any =[]
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCamelCase__ : int =key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
lowerCamelCase__ : Optional[Any] =pipeline.text_encoder
else:
lowerCamelCase__ : Tuple =key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
lowerCamelCase__ : List[str] =pipeline.unet
# find the target layer
lowerCamelCase__ : Dict =layer_infos.pop(0 )
while len(__lowerCamelCase ) > -1:
try:
lowerCamelCase__ : List[str] =curr_layer.__getattr__(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCamelCase__ : List[Any] =layer_infos.pop(0 )
elif len(__lowerCamelCase ) == 0:
break
except Exception:
if len(__lowerCamelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCamelCase__ : Optional[int] =layer_infos.pop(0 )
lowerCamelCase__ : List[Any] =[]
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(__lowerCamelCase )
else:
pair_keys.append(__lowerCamelCase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCamelCase__ : int =state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCamelCase__ : Union[str, Any] =state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCamelCase__ : Union[str, Any] =state_dict[pair_keys[0]].to(torch.floataa )
lowerCamelCase__ : Dict =state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(__lowerCamelCase , __lowerCamelCase )
# update visited list
for item in pair_keys:
visited.append(__lowerCamelCase )
return pipeline
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
_lowercase : Optional[Any] =parser.parse_args()
_lowercase : Optional[Any] =args.base_model_path
_lowercase : int =args.checkpoint_path
_lowercase : Tuple =args.dump_path
_lowercase : int =args.lora_prefix_unet
_lowercase : Dict =args.lora_prefix_text_encoder
_lowercase : List[str] =args.alpha
_lowercase : int =convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
_lowercase : List[Any] =pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 702 |
"""simple docstring"""
_lowercase : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 625 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=2 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=36 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=6 , _UpperCamelCase=6 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , _UpperCamelCase=1_000 , ) -> Optional[Any]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = text_seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = coordinate_size
lowerCAmelCase_ = shape_size
lowerCAmelCase_ = num_labels
lowerCAmelCase_ = num_choices
lowerCAmelCase_ = scope
lowerCAmelCase_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCAmelCase_ = text_seq_length
lowerCAmelCase_ = (image_size // patch_size) ** 2 + 1
lowerCAmelCase_ = self.text_seq_length + self.image_seq_length
def __a ( self ) -> Dict:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCAmelCase_ = bbox[i, j, 3]
lowerCAmelCase_ = bbox[i, j, 1]
lowerCAmelCase_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCAmelCase_ = bbox[i, j, 2]
lowerCAmelCase_ = bbox[i, j, 0]
lowerCAmelCase_ = t
lowerCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowerCAmelCase_ = None
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowerCAmelCase_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = LayoutLMvaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
# text + image
lowerCAmelCase_ = model(_UpperCamelCase , pixel_values=_UpperCamelCase )
lowerCAmelCase_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCAmelCase_ = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , token_type_ids=_UpperCamelCase )
lowerCAmelCase_ = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCAmelCase_ = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCAmelCase_ = model(pixel_values=_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LayoutLMvaForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
lowerCAmelCase_ = self.num_labels
lowerCAmelCase_ = LayoutLMvaForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
lowerCAmelCase_ = LayoutLMvaForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
lowerCAmelCase_ = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = config_and_inputs
lowerCAmelCase_ = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __a , __a , unittest.TestCase ):
_lowercase =False
_lowercase =False
_lowercase =False
_lowercase =(
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowercase =(
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def __a ( self ) -> Any:
lowerCAmelCase_ = LayoutLMvaModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]:
lowerCAmelCase_ = copy.deepcopy(_UpperCamelCase )
if model_class in get_values(_UpperCamelCase ):
lowerCAmelCase_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_UpperCamelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCamelCase ):
lowerCAmelCase_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in get_values(_UpperCamelCase ):
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in [
*get_values(_UpperCamelCase ),
]:
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase )
elif model_class in [
*get_values(_UpperCamelCase ),
]:
lowerCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_UpperCamelCase , )
return inputs_dict
def __a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __a ( self ) -> Union[str, Any]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def __a ( self ) -> Union[str, Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = LayoutLMvaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def __a ( self ) -> Dict:
return LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) if is_vision_available() else None
@slow
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(_UpperCamelCase )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values.to(_UpperCamelCase )
lowerCAmelCase_ = torch.tensor([[1, 2]] )
lowerCAmelCase_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowerCAmelCase_ = model(
input_ids=input_ids.to(_UpperCamelCase ) , bbox=bbox.to(_UpperCamelCase ) , pixel_values=pixel_values.to(_UpperCamelCase ) , )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCamelCase )
lowerCAmelCase_ = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
| 290 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 1 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCAmelCase = 1_0
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
for i in range(__a , __a ):
if array[i] == target:
return i
return -1
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : str = 0
_a : Any = len(__a )
while left <= right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : int = (left + right) // 3 + 1
_a : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_a : Optional[int] = one_third - 1
elif array[two_third] < target:
_a : Union[str, Any] = two_third + 1
else:
_a : Union[str, Any] = one_third + 1
_a : str = two_third - 1
else:
return -1
def UpperCAmelCase_ (__a : int , __a : int , __a : list[int] , __a : int ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
_a : str = (left + right) // 3 + 1
_a : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__a , one_third - 1 , __a , __a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __a , __a , __a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __a , __a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
__lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
__lowerCAmelCase = ite_ternary_search(collection, target)
__lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 319 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : int = (3, 32, 128)
_a : Any = tempfile.mkdtemp()
# fmt: off
_a : Optional[int] = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_a : Any = dict(zip(_a ,range(len(_a ) ) ) )
_a : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(_a ) + '\n' )
_a : Dict = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 32, 'width': 128},
}
_a : Dict = os.path.join(self.tmpdirname ,_a )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(_a ,_a )
def __lowercase ( self : Dict ,**_a : Any ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Optional[int] ,**_a : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )
_a : int = Image.fromarray(np.moveaxis(_a ,0 ,-1 ) )
return image_input
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = self.get_tokenizer()
_a : Optional[int] = self.get_image_processor()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : List[Any] = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_a )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Union[str, Any] = self.get_tokenizer()
_a : Any = self.get_image_processor()
_a : List[str] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_a : int = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
_a : Dict = self.get_image_processor(do_normalize=_a ,padding_value=1.0 )
_a : Optional[int] = MgpstrProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=_a ,padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer ,_a )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = self.get_image_processor()
_a : int = self.get_tokenizer()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Optional[int] = self.prepare_image_inputs()
_a : Optional[int] = image_processor(_a ,return_tensors='np' )
_a : str = processor(images=_a ,return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[Any] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Dict = 'test'
_a : Optional[Any] = processor(text=_a )
_a : Any = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : Any = self.get_tokenizer()
_a : List[str] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Optional[int] = 'test'
_a : Optional[Any] = self.prepare_image_inputs()
_a : Tuple = processor(text=_a ,images=_a )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'labels'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Any = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_a : Any = processor.char_decode(_a )
_a : int = tokenizer.batch_decode(_a )
_a : List[Any] = [seq.replace(' ' ,'' ) for seq in decoded_tok]
self.assertListEqual(_a ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = self.get_image_processor()
_a : Dict = self.get_tokenizer()
_a : int = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : List[str] = None
_a : int = self.prepare_image_inputs()
_a : List[str] = processor(text=_a ,images=_a )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = self.get_image_processor()
_a : List[str] = self.get_tokenizer()
_a : Optional[Any] = MgpstrProcessor(tokenizer=_a ,image_processor=_a )
_a : Tuple = torch.randn(1 ,27 ,38 )
_a : Optional[int] = torch.randn(1 ,27 ,5_0257 )
_a : List[str] = torch.randn(1 ,27 ,3_0522 )
_a : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) ,['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'] )
| 319 | 1 |
"""simple docstring"""
from string import ascii_uppercase
__lowerCamelCase :Dict = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase :str = dict(enumerate(ascii_uppercase))
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Tuple = len(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = 0
while True:
if x == i:
lowerCamelCase : Tuple = 0
if len(UpperCamelCase__ ) == len(UpperCamelCase__ ):
break
key += key[i]
i += 1
return key
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Any = """"""
lowerCamelCase : Optional[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCamelCase : Union[str, Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
lowerCamelCase : Dict = """"""
lowerCamelCase : int = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCamelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case ( ) -> None:
lowerCamelCase : int = """THE GERMAN ATTACK"""
lowerCamelCase : Union[str, Any] = """SECRET"""
lowerCamelCase : Union[str, Any] = generate_key(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = cipher_text(UpperCamelCase__ , UpperCamelCase__ )
print(F'Encrypted Text = {s}' )
print(F'Original Text = {original_text(UpperCamelCase__ , UpperCamelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 222 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =['''input_features''']
def __init__( self: Dict , __a: Dict=80 , __a: str=16_000 , __a: int=160 , __a: Tuple=30 , __a: List[str]=400 , __a: Union[str, Any]=0.0 , __a: str=False , **__a: List[Any] , )-> Union[str, Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
lowerCamelCase : List[str] = n_fft
lowerCamelCase : Optional[int] = hop_length
lowerCamelCase : List[Any] = chunk_length
lowerCamelCase : Tuple = chunk_length * sampling_rate
lowerCamelCase : Optional[Any] = self.n_samples // hop_length
lowerCamelCase : int = sampling_rate
lowerCamelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , )
def a__ ( self: int , __a: np.array )-> np.ndarray:
lowerCamelCase : Tuple = spectrogram(
__a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCamelCase : Union[str, Any] = log_spec[:, :-1]
lowerCamelCase : Optional[Any] = np.maximum(__a , log_spec.max() - 8.0 )
lowerCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a__ ( __a: List[np.ndarray] , __a: List[np.ndarray] , __a: float = 0.0 )-> List[np.ndarray]:
if attention_mask is not None:
lowerCamelCase : int = np.array(__a , np.intaa )
lowerCamelCase : Any = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
lowerCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase : Tuple = padding_value
normed_input_values.append(__a )
else:
lowerCamelCase : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: str , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: bool = True , __a: Optional[int] = None , __a: Optional[Union[str, TensorType]] = None , __a: Optional[bool] = None , __a: Optional[str] = "max_length" , __a: Optional[int] = None , __a: Optional[int] = None , __a: Optional[bool] = None , **__a: List[Any] , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : int = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : Optional[int] = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : str = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : List[Any] = [np.asarray([raw_speech] ).T]
lowerCamelCase : Optional[int] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
lowerCamelCase : Tuple = self.pad(
__a , padding=__a , max_length=max_length if max_length else self.n_samples , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase : int = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCamelCase : Any = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
lowerCamelCase : int = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
lowerCamelCase : str = [self._np_extract_fbank_features(__a ) for waveform in input_features[0]]
if isinstance(input_features[0] , __a ):
lowerCamelCase : List[Any] = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase : int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase : Dict = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def a__ ( self: Optional[int] )-> Dict[str, Any]:
lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 222 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int=1) -> int:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split(".")[n_shave_prefix_segments:])
else:
return ".".join(path.split(".")[:n_shave_prefix_segments])
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=0) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = []
for old_item in old_list:
__UpperCamelCase : int = old_item.replace("in_layers.0" , "norm1")
__UpperCamelCase : Union[str, Any] = new_item.replace("in_layers.2" , "conv1")
__UpperCamelCase : int = new_item.replace("out_layers.0" , "norm2")
__UpperCamelCase : int = new_item.replace("out_layers.3" , "conv2")
__UpperCamelCase : int = new_item.replace("emb_layers.1" , "time_emb_proj")
__UpperCamelCase : List[Any] = new_item.replace("skip_connection" , "conv_shortcut")
__UpperCamelCase : List[Any] = shave_segments(_lowerCamelCase , n_shave_prefix_segments=_lowerCamelCase)
mapping.append({"old": old_item, "new": new_item})
return mapping
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : int=0) -> Dict:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = []
for old_item in old_list:
__UpperCamelCase : Optional[Any] = old_item
__UpperCamelCase : Any = new_item.replace("norm.weight" , "group_norm.weight")
__UpperCamelCase : List[str] = new_item.replace("norm.bias" , "group_norm.bias")
__UpperCamelCase : Any = new_item.replace("proj_out.weight" , "proj_attn.weight")
__UpperCamelCase : int = new_item.replace("proj_out.bias" , "proj_attn.bias")
__UpperCamelCase : Any = shave_segments(_lowerCamelCase , n_shave_prefix_segments=_lowerCamelCase)
mapping.append({"old": old_item, "new": new_item})
return mapping
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict=None , _lowerCamelCase : int=None , _lowerCamelCase : str=None) -> List[Any]:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
__UpperCamelCase : Union[str, Any] = old_checkpoint[path]
__UpperCamelCase : Optional[int] = old_tensor.shape[0] // 3
__UpperCamelCase : Optional[Any] = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
__UpperCamelCase : Optional[int] = old_tensor.shape[0] // config["num_head_channels"] // 3
__UpperCamelCase : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Dict = old_tensor.split(channels // num_heads , dim=1)
__UpperCamelCase : str = query.reshape(_lowerCamelCase)
__UpperCamelCase : Any = key.reshape(_lowerCamelCase)
__UpperCamelCase : int = value.reshape(_lowerCamelCase)
for path in paths:
__UpperCamelCase : Tuple = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
__UpperCamelCase : int = new_path.replace("middle_block.0" , "mid_block.resnets.0")
__UpperCamelCase : List[Any] = new_path.replace("middle_block.1" , "mid_block.attentions.0")
__UpperCamelCase : Union[str, Any] = new_path.replace("middle_block.2" , "mid_block.resnets.1")
if additional_replacements is not None:
for replacement in additional_replacements:
__UpperCamelCase : Any = new_path.replace(replacement["old"] , replacement["new"])
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
__UpperCamelCase : Tuple = old_checkpoint[path["old"]][:, :, 0]
else:
__UpperCamelCase : int = old_checkpoint[path["old"]]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any]) -> str:
'''simple docstring'''
__UpperCamelCase : str = {}
__UpperCamelCase : Union[str, Any] = checkpoint["time_embed.0.weight"]
__UpperCamelCase : Tuple = checkpoint["time_embed.0.bias"]
__UpperCamelCase : str = checkpoint["time_embed.2.weight"]
__UpperCamelCase : List[str] = checkpoint["time_embed.2.bias"]
__UpperCamelCase : str = checkpoint["input_blocks.0.0.weight"]
__UpperCamelCase : Tuple = checkpoint["input_blocks.0.0.bias"]
__UpperCamelCase : int = checkpoint["out.0.weight"]
__UpperCamelCase : Optional[int] = checkpoint["out.0.bias"]
__UpperCamelCase : Union[str, Any] = checkpoint["out.2.weight"]
__UpperCamelCase : str = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
__UpperCamelCase : int = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "input_blocks" in layer})
__UpperCamelCase : Dict = {
layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key]
for layer_id in range(_lowerCamelCase)
}
# Retrieves the keys for the middle blocks only
__UpperCamelCase : List[str] = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "middle_block" in layer})
__UpperCamelCase : Dict = {
layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key]
for layer_id in range(_lowerCamelCase)
}
# Retrieves the keys for the output blocks only
__UpperCamelCase : Optional[int] = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "output_blocks" in layer})
__UpperCamelCase : Dict = {
layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key]
for layer_id in range(_lowerCamelCase)
}
for i in range(1 , _lowerCamelCase):
__UpperCamelCase : Optional[int] = (i - 1) // (config["num_res_blocks"] + 1)
__UpperCamelCase : List[Any] = (i - 1) % (config["num_res_blocks"] + 1)
__UpperCamelCase : Any = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key]
__UpperCamelCase : Any = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key]
if F'input_blocks.{i}.0.op.weight' in checkpoint:
__UpperCamelCase : List[str] = checkpoint[
F'input_blocks.{i}.0.op.weight'
]
__UpperCamelCase : Tuple = checkpoint[
F'input_blocks.{i}.0.op.bias'
]
continue
__UpperCamelCase : List[Any] = renew_resnet_paths(_lowerCamelCase)
__UpperCamelCase : Optional[int] = {"old": F'input_blocks.{i}.0', "new": F'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
__UpperCamelCase : Optional[Any] = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path, resnet_op] , config=_lowerCamelCase)
if len(_lowerCamelCase):
__UpperCamelCase : Tuple = renew_attention_paths(_lowerCamelCase)
__UpperCamelCase : str = {
"old": F'input_blocks.{i}.1',
"new": F'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCamelCase : Tuple = {
F'input_blocks.{i}.1.qkv.bias': {
"key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'input_blocks.{i}.1.qkv.weight': {
"key": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"query": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"value": F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=_lowerCamelCase , config=_lowerCamelCase , )
__UpperCamelCase : List[str] = middle_blocks[0]
__UpperCamelCase : List[Any] = middle_blocks[1]
__UpperCamelCase : str = middle_blocks[2]
__UpperCamelCase : Optional[int] = renew_resnet_paths(_lowerCamelCase)
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase)
__UpperCamelCase : Optional[int] = renew_resnet_paths(_lowerCamelCase)
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase)
__UpperCamelCase : str = renew_attention_paths(_lowerCamelCase)
__UpperCamelCase : Optional[int] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , attention_paths_to_split=_lowerCamelCase , config=_lowerCamelCase)
for i in range(_lowerCamelCase):
__UpperCamelCase : Dict = i // (config["num_res_blocks"] + 1)
__UpperCamelCase : List[str] = i % (config["num_res_blocks"] + 1)
__UpperCamelCase : Dict = [shave_segments(_lowerCamelCase , 2) for name in output_blocks[i]]
__UpperCamelCase : Optional[Any] = {}
for layer in output_block_layers:
__UpperCamelCase , __UpperCamelCase : Optional[Any] = layer.split(".")[0], shave_segments(_lowerCamelCase , 1)
if layer_id in output_block_list:
output_block_list[layer_id].append(_lowerCamelCase)
else:
__UpperCamelCase : Any = [layer_name]
if len(_lowerCamelCase) > 1:
__UpperCamelCase : Any = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key]
__UpperCamelCase : Any = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key]
__UpperCamelCase : str = renew_resnet_paths(_lowerCamelCase)
__UpperCamelCase : str = renew_resnet_paths(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = {"old": F'output_blocks.{i}.0', "new": F'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , config=_lowerCamelCase)
if ["conv.weight", "conv.bias"] in output_block_list.values():
__UpperCamelCase : Tuple = list(output_block_list.values()).index(["conv.weight", "conv.bias"])
__UpperCamelCase : str = checkpoint[
F'output_blocks.{i}.{index}.conv.weight'
]
__UpperCamelCase : List[Any] = checkpoint[
F'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(_lowerCamelCase) == 2:
__UpperCamelCase : int = []
if len(_lowerCamelCase):
__UpperCamelCase : Any = renew_attention_paths(_lowerCamelCase)
__UpperCamelCase : Optional[Any] = {
"old": F'output_blocks.{i}.1',
"new": F'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
__UpperCamelCase : str = {
F'output_blocks.{i}.1.qkv.bias': {
"key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
"query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
"value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
F'output_blocks.{i}.1.qkv.weight': {
"key": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
"query": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
"value": F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("qkv" in key for key in attentions) else None , config=_lowerCamelCase , )
else:
__UpperCamelCase : Union[str, Any] = renew_resnet_paths(_lowerCamelCase , n_shave_prefix_segments=1)
for path in resnet_0_paths:
__UpperCamelCase : Optional[int] = ".".join(["output_blocks", str(_lowerCamelCase), path["old"]])
__UpperCamelCase : str = ".".join(["up_blocks", str(_lowerCamelCase), "resnets", str(_lowerCamelCase), path["new"]])
__UpperCamelCase : Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Tuple = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowercase : Union[str, Any] = json.loads(f.read())
lowercase : Any = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowercase : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowercase : Tuple = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
lowercase : List[str] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
lowercase : Any = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path) | 94 |
import re
import string
import numpy as np
import datasets
lowercase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
lowercase : List[str] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowerCamelCase__ ( datasets.Metric):
'''simple docstring'''
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self :int , a :Optional[Any] , a :Dict , a :Optional[int]=None , a :int=False , a :Tuple=False , a :Optional[int]=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__UpperCamelCase : List[Any] = np.array([re.sub(a , "" , a ) for x in predictions] )
__UpperCamelCase : Optional[Any] = np.array([re.sub(a , "" , a ) for x in references] )
else:
__UpperCamelCase : Optional[int] = np.asarray(a )
__UpperCamelCase : List[str] = np.asarray(a )
if ignore_case:
__UpperCamelCase : Optional[int] = np.char.lower(a )
__UpperCamelCase : str = np.char.lower(a )
if ignore_punctuation:
__UpperCamelCase : Tuple = string.punctuation.maketrans("" , "" , string.punctuation )
__UpperCamelCase : int = np.char.translate(a , table=a )
__UpperCamelCase : str = np.char.translate(a , table=a )
if ignore_numbers:
__UpperCamelCase : List[str] = string.digits.maketrans("" , "" , string.digits )
__UpperCamelCase : Tuple = np.char.translate(a , table=a )
__UpperCamelCase : Union[str, Any] = np.char.translate(a , table=a )
__UpperCamelCase : List[Any] = predictions == references
return {"exact_match": np.mean(a ) * 1_0_0} | 94 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Dict = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 4 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A : Any = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
for pegasus_name, hf_name in PATTERNS:
_lowercase = k.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return k
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ) -> PegasusForConditionalGeneration:
_lowercase = DEFAULTS.copy()
cfg_kwargs.update(SCREAMING_SNAKE_CASE_ )
_lowercase = PegasusConfig(**SCREAMING_SNAKE_CASE_ )
_lowercase = PegasusForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
_lowercase = torch_model.model.state_dict()
_lowercase = {}
for k, v in tf_weights.items():
_lowercase = rename_state_dict_key(SCREAMING_SNAKE_CASE_ )
if new_k not in sd:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_lowercase = v.T
_lowercase = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_lowercase = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_lowercase = mapping["""shared.weight"""]
_lowercase = mapping["""shared.weight"""]
_lowercase = {k: torch.zeros_like(SCREAMING_SNAKE_CASE_ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**SCREAMING_SNAKE_CASE_ )
_lowercase , _lowercase = torch_model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_lowercase = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Optional[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
_lowercase = tf.train.list_variables(SCREAMING_SNAKE_CASE_ )
_lowercase = {}
_lowercase = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(SCREAMING_SNAKE_CASE_ , desc="""converting tf checkpoint to dict""" ):
_lowercase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowercase = tf.train.load_variable(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowercase = array
return tf_weights
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> str:
# save tokenizer first
_lowercase = Path(SCREAMING_SNAKE_CASE_ ).parent.name
_lowercase = task_specific_params[f"""summarization_{dataset}"""]["""max_position_embeddings"""]
_lowercase = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=SCREAMING_SNAKE_CASE_ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(SCREAMING_SNAKE_CASE_ )
# convert model
_lowercase = get_tf_weights_as_numpy(SCREAMING_SNAKE_CASE_ )
_lowercase = task_specific_params[f"""summarization_{dataset}"""]
if dataset == "large":
_lowercase = task_specific_params
_lowercase = convert_pegasus(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
torch_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
_lowercase = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(SCREAMING_SNAKE_CASE_ , Path(SCREAMING_SNAKE_CASE_ ) / """pytorch_model.bin""" )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
A : str = parser.parse_args()
if args.save_dir is None:
A : Tuple = Path(args.tf_ckpt_path).parent.name
A : List[Any] = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 287 | 0 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor"]
a = "SamImageProcessor"
def __init__( self : Dict , _A : Any):
"""simple docstring"""
super().__init__(_A)
_SCREAMING_SNAKE_CASE : Tuple = self.image_processor
_SCREAMING_SNAKE_CASE : Optional[int] = -1_0
_SCREAMING_SNAKE_CASE : str = self.image_processor.size["""longest_edge"""]
def __call__( self : Union[str, Any] , _A : Tuple=None , _A : List[str]=None , _A : Optional[int]=None , _A : Tuple=None , _A : Optional[Union[str, TensorType]] = None , **_A : int , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = self.image_processor(
_A , return_tensors=_A , **_A , )
# pop arguments that are not used in the foward but used nevertheless
_SCREAMING_SNAKE_CASE : Optional[int] = encoding_image_processor["""original_sizes"""]
if hasattr(_A , """numpy"""): # Checks if Torch or TF tensor
_SCREAMING_SNAKE_CASE : Optional[Any] = original_sizes.numpy()
_SCREAMING_SNAKE_CASE : Optional[Any] = self._check_and_preprocess_points(
input_points=_A , input_labels=_A , input_boxes=_A , )
_SCREAMING_SNAKE_CASE : List[Any] = self._normalize_and_convert(
_A , _A , input_points=_A , input_labels=_A , input_boxes=_A , return_tensors=_A , )
return encoding_image_processor
def _lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : Tuple , _A : Union[str, Any]=None , _A : Tuple=None , _A : int=None , _A : List[str]="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(_A) != len(_A):
_SCREAMING_SNAKE_CASE : List[str] = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0]) for point in input_points
]
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
self._normalize_coordinates(self.target_size , _A , _A)
for point, original_size in zip(_A , _A)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._pad_points_and_labels(_A , _A)
_SCREAMING_SNAKE_CASE : List[str] = np.array(_A)
if input_labels is not None:
_SCREAMING_SNAKE_CASE : Any = np.array(_A)
if input_boxes is not None:
if len(_A) != len(_A):
_SCREAMING_SNAKE_CASE : Optional[int] = [
self._normalize_coordinates(self.target_size , _A , original_sizes[0] , is_bounding_box=_A)
for box in input_boxes
]
else:
_SCREAMING_SNAKE_CASE : List[Any] = [
self._normalize_coordinates(self.target_size , _A , _A , is_bounding_box=_A)
for box, original_size in zip(_A , _A)
]
_SCREAMING_SNAKE_CASE : Any = np.array(_A)
if input_boxes is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(_A)
# boxes batch size of 1 by default
_SCREAMING_SNAKE_CASE : Dict = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(_A)
# boxes batch size of 1 by default
_SCREAMING_SNAKE_CASE : int = tf.expand_dims(_A , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes})
if input_points is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(_A)
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE : str = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(_A)
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE : int = tf.expand_dims(_A , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points})
if input_labels is not None:
if return_tensors == "pt":
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(_A)
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE : Optional[int] = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
_SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(_A)
# point batch size of 1 by default
_SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(_A , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels})
return encoding_image_processor
def _lowerCAmelCase ( self : List[Any] , _A : Dict , _A : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = max([point.shape[0] for point in input_points])
_SCREAMING_SNAKE_CASE : Any = []
for i, point in enumerate(_A):
if point.shape[0] != expected_nb_points:
_SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
_SCREAMING_SNAKE_CASE : Optional[Any] = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(_A)
_SCREAMING_SNAKE_CASE : Optional[Any] = processed_input_points
return input_points, input_labels
def _lowerCAmelCase ( self : Optional[int] , _A : int , _A : np.ndarray , _A : Union[str, Any] , _A : Tuple=False):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = original_size
_SCREAMING_SNAKE_CASE : Any = self.image_processor._get_preprocess_shape(_A , longest_edge=_A)
_SCREAMING_SNAKE_CASE : Dict = deepcopy(_A).astype(_A)
if is_bounding_box:
_SCREAMING_SNAKE_CASE : Optional[Any] = coords.reshape(-1 , 2 , 2)
_SCREAMING_SNAKE_CASE : str = coords[..., 0] * (new_w / old_w)
_SCREAMING_SNAKE_CASE : List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_SCREAMING_SNAKE_CASE : List[str] = coords.reshape(-1 , 4)
return coords
def _lowerCAmelCase ( self : Union[str, Any] , _A : Dict=None , _A : Dict=None , _A : List[str]=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(_A , """numpy"""): # Checks for TF or Torch tensor
_SCREAMING_SNAKE_CASE : Tuple = input_points.numpy().tolist()
if not isinstance(_A , _A) or not isinstance(input_points[0] , _A):
raise ValueError("""Input points must be a list of list of floating points.""")
_SCREAMING_SNAKE_CASE : str = [np.array(_A) for input_point in input_points]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if input_labels is not None:
if hasattr(_A , """numpy"""):
_SCREAMING_SNAKE_CASE : Optional[Any] = input_labels.numpy().tolist()
if not isinstance(_A , _A) or not isinstance(input_labels[0] , _A):
raise ValueError("""Input labels must be a list of list integers.""")
_SCREAMING_SNAKE_CASE : Optional[Any] = [np.array(_A) for label in input_labels]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = None
if input_boxes is not None:
if hasattr(_A , """numpy"""):
_SCREAMING_SNAKE_CASE : Union[str, Any] = input_boxes.numpy().tolist()
if (
not isinstance(_A , _A)
or not isinstance(input_boxes[0] , _A)
or not isinstance(input_boxes[0][0] , _A)
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""")
_SCREAMING_SNAKE_CASE : Optional[int] = [np.array(_A).astype(np.floataa) for box in input_boxes]
else:
_SCREAMING_SNAKE_CASE : Any = None
return input_points, input_labels, input_boxes
@property
def _lowerCAmelCase ( self : Dict):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.image_processor.model_input_names
return list(dict.fromkeys(_A))
def _lowerCAmelCase ( self : Optional[Any] , *_A : str , **_A : Optional[int]):
"""simple docstring"""
return self.image_processor.post_process_masks(*_A , **_A)
| 712 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE )
# set absolute/relative position embeddings parameter
_SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WTQ":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : Optional[int] = 4
_SCREAMING_SNAKE_CASE : Any = True
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 0.66_46_94
_SCREAMING_SNAKE_CASE : str = 0.20_79_51
_SCREAMING_SNAKE_CASE : str = 0.12_11_94
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13
_SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_SCREAMING_SNAKE_CASE : int = 4
_SCREAMING_SNAKE_CASE : Tuple = False
# hparam_utils.py hparams
_SCREAMING_SNAKE_CASE : Any = 36.45_19
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21
_SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Dict = 0.76_31_41
_SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
elif task == "TABFACT":
_SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE )
elif task == "MLM":
_SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE )
elif task == "INTERMEDIATE_PRETRAINING":
_SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
_SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.'''
)
parser.add_argument(
'''--reset_position_index_per_cell''',
default=False,
action='''store_true''',
help='''Whether to use relative position embeddings or not. Defaults to True.''',
)
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--tapas_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained TAPAS model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 635 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowerCAmelCase = None
_lowerCAmelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowerCAmelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class _UpperCAmelCase :
a = True
a = None
# Automatically constructed
a = "PIL.Image.Image"
a = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
a = field(default='''Image''' , init=_lowerCamelCase , repr=_lowerCamelCase )
def __call__( self ):
return self.pa_type
def _lowerCamelCase ( self , a__ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(a__ , a__ ):
A_ : str = np.array(a__ )
if isinstance(a__ , a__ ):
return {"path": value, "bytes": None}
elif isinstance(a__ , a__ ):
return {"path": None, "bytes": value}
elif isinstance(a__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a__ )
elif isinstance(a__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a__ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _lowerCamelCase ( self , a__ , a__=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
A_ : Optional[int] = {}
A_ , A_ : Any = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(a__ ):
A_ : List[str] = PIL.Image.open(a__ )
else:
A_ : Union[str, Any] = path.split("""::""" )[-1]
try:
A_ : Union[str, Any] = string_to_dict(a__ , config.HUB_DATASETS_URL )["""repo_id"""]
A_ : Tuple = token_per_repo_id.get(a__ )
except ValueError:
A_ : int = None
with xopen(a__ , """rb""" , use_auth_token=a__ ) as f:
A_ : List[Any] = BytesIO(f.read() )
A_ : List[str] = PIL.Image.open(bytes_ )
else:
A_ : List[str] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _lowerCamelCase ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def _lowerCamelCase ( self , a__ ):
if pa.types.is_string(storage.type ):
A_ : Optional[int] = pa.array([None] * len(a__ ) , type=pa.binary() )
A_ : int = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : Tuple = pa.array([None] * len(a__ ) , type=pa.string() )
A_ : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A_ : Optional[int] = storage.field("""bytes""" )
else:
A_ : Union[str, Any] = pa.array([None] * len(a__ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A_ : int = storage.field("""path""" )
else:
A_ : str = pa.array([None] * len(a__ ) , type=pa.string() )
A_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ : Union[str, Any] = pa.array(
[encode_np_array(np.array(a__ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ : Dict = pa.array([None] * len(a__ ) , type=pa.string() )
A_ : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(a__ , self.pa_type )
def _lowerCamelCase ( self , a__ ):
@no_op_if_value_is_null
def path_to_bytes(a__ ):
with xopen(a__ , """rb""" ) as f:
A_ : int = f.read()
return bytes_
A_ : Optional[int] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : Dict = pa.array(
[os.path.basename(a__ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(a__ , self.pa_type )
def _lowerCAmelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ : List[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : List[Any] = BytesIO()
if image.format in list_image_compression_formats():
A_ : int = image.format
else:
A_ : Union[str, Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(_lowerCAmelCase ,format=_lowerCAmelCase )
return buffer.getvalue()
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if hasattr(_lowerCAmelCase ,"""filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowerCAmelCase )}
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
A_ : List[Any] = array.dtype
A_ : Tuple = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
A_ : Any = dtype.kind
A_ : List[Any] = dtype.itemsize
A_ : Union[str, Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ : Dict = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ : Tuple = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ : Optional[Any] = dtype_byteorder + dtype_kind + str(_lowerCAmelCase )
A_ : List[Any] = np.dtype(_lowerCAmelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
A_ : Optional[Any] = PIL.Image.fromarray(array.astype(_lowerCAmelCase ) )
return {"path": None, "bytes": image_to_bytes(_lowerCAmelCase )}
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
A_ , A_ : Tuple = first_non_null_value(_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowerCAmelCase ,np.ndarray ):
A_ : int = no_op_if_value_is_null(_lowerCAmelCase )
return [obj_to_image_dict_func(_lowerCAmelCase ) for obj in objs]
elif isinstance(_lowerCAmelCase ,PIL.Image.Image ):
A_ : Optional[Any] = no_op_if_value_is_null(_lowerCAmelCase )
return [obj_to_image_dict_func(_lowerCAmelCase ) for obj in objs]
else:
return objs
else:
return objs
| 569 |
from ...configuration_utils import PretrainedConfig
class _UpperCAmelCase ( _lowerCamelCase ):
a = '''bert-generation'''
def __init__( self , a__=50358 , a__=1024 , a__=24 , a__=16 , a__=4096 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0.02 , a__=1E-12 , a__=0 , a__=2 , a__=1 , a__="absolute" , a__=True , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
A_ : List[str] = vocab_size
A_ : int = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : Optional[int] = intermediate_size
A_ : List[Any] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : Optional[Any] = initializer_range
A_ : str = layer_norm_eps
A_ : str = position_embedding_type
A_ : List[Any] = use_cache
| 569 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 | import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Optional[Any] = random.Random()
def _a ( lowercase__ : List[str] , lowercase__ : List[Any]=1.0 , lowercase__ : Optional[int]=None , lowercase__ : List[str]=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[Any] , a_ : Union[str, Any]=7 , a_ : Any=400 , a_ : List[Any]=2000 , a_ : Tuple=1 , a_ : Optional[int]=0.0 , a_ : Optional[Any]=1_6000 , a_ : str=True , a_ : Union[str, Any]=80 , a_ : Dict=16 , a_ : Tuple=64 , a_ : Any="hann_window" , a_ : Union[str, Any]=80 , a_ : List[Any]=7600 , a_ : Optional[Any]=1e-1_0 , a_ : Dict=True , )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = max_seq_length
SCREAMING_SNAKE_CASE__ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : int = feature_size
SCREAMING_SNAKE_CASE__ : str = padding_value
SCREAMING_SNAKE_CASE__ : Any = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : int = num_mel_bins
SCREAMING_SNAKE_CASE__ : int = hop_length
SCREAMING_SNAKE_CASE__ : str = win_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = win_function
SCREAMING_SNAKE_CASE__ : List[str] = fmin
SCREAMING_SNAKE_CASE__ : Dict = fmax
SCREAMING_SNAKE_CASE__ : int = mel_floor
SCREAMING_SNAKE_CASE__ : Tuple = return_attention_mask
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __lowercase( self : List[Any] , a_ : str=False , a_ : List[Any]=False )-> Optional[Any]:
"""simple docstring"""
def _flatten(a_ : int ):
return list(itertools.chain(*a_ ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
def __lowercase( self : Any , a_ : int=False , a_ : Any=False )-> Union[str, Any]:
"""simple docstring"""
if equal_length:
SCREAMING_SNAKE_CASE__ : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Tuple = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : List[str] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = SpeechTaFeatureExtractor
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = SpeechTaFeatureExtractionTester(self )
def __lowercase( self : Any , a_ : Optional[int] )-> List[str]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_ , axis=0 ) - 1 ) < 1e-3 ) )
def __lowercase( self : Tuple )-> Dict:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Tuple = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : str = feat_extract(a_ , padding=a_ , max_length=a_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = range(800 , 1400 , 200 )
SCREAMING_SNAKE_CASE__ : int = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ : int = ['longest', 'max_length', 'do_not_pad']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(a_ , max_length=a_ , padding=a_ )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __lowercase( self : int )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='max_length' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(
a_ , truncation=a_ , max_length=1000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : str = feat_extract(
a_ , truncation=a_ , max_length=2000 , padding='longest' , return_tensors='np' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __lowercase( self : Any )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowercase( self : Any )-> Optional[int]:
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
SCREAMING_SNAKE_CASE__ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(audio_target=a_ , padding=a_ , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : int = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = feature_extractor(a_ , return_tensors='np' ).input_values
SCREAMING_SNAKE_CASE__ : str = feature_extractor(a_ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
def __lowercase( self : Dict )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ : str = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
SCREAMING_SNAKE_CASE__ : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : int = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : List[Any] )-> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a_ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Any = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __lowercase( self : Tuple )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Dict = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : str = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __lowercase( self : Any )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Any = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def __lowercase( self : str )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.feature_extraction_class(**a_ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ : Tuple = [len(a_ ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ : Dict = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ : str = min(a_ )
SCREAMING_SNAKE_CASE__ : Any = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ : int = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __lowercase( self : Optional[int] , a_ : List[str] )-> Any:
"""simple docstring"""
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : List[Any] = ds.sort('id' ).select(range(a_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowercase( self : List[str] )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
SCREAMING_SNAKE_CASE__ : List[str] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[str] = feature_extractor(a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a_ , atol=1e-6 ) )
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : int = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ : str = feature_extractor(audio_target=a_ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 636 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
A_ : List[Any] = parent
A_ : Any = batch_size
A_ : Any = seq_length
A_ : List[str] = is_training
A_ : Any = use_token_type_ids
A_ : Tuple = use_labels
A_ : Any = vocab_size
A_ : Any = hidden_size
A_ : Dict = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Dict = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : Optional[Any] = type_sequence_label_size
A_ : Optional[Any] = initializer_range
A_ : Optional[int] = num_labels
A_ : str = num_choices
A_ : int = scope
A_ : Tuple = self.vocab_size - 1
def lowerCamelCase(self ):
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[int] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Tuple = None
A_ : List[Any] = None
A_ : List[str] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
A_ : Optional[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
A_ : Optional[int] = OpenAIGPTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : List[str] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ )
A_ : Any = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
A_ : int = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
A_ : Union[str, Any] = OpenAIGPTLMHeadModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Any = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
A_ : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : str = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ):
A_ : List[Any] = self.num_labels
A_ : List[str] = OpenAIGPTForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
A_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[Any] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase(self ):
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : List[str] = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : Optional[Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_A : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_A : int = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ):
A_ : Tuple = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
A_ : Union[str, Any] = inputs_dict["""labels"""]
A_ : List[str] = inputs_dict["""labels"""]
A_ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase_ , )
A_ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def lowerCamelCase(self ):
A_ : int = OpenAIGPTModelTester(self )
A_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , n_embd=37 )
def lowerCamelCase(self ):
self.config_tester.run_common_tests()
def lowerCamelCase(self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase_ )
@slow
def lowerCamelCase(self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = OpenAIGPTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase(self ):
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(lowerCAmelCase_ )
A_ : Dict = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowerCAmelCase_ ) # the president is
A_ : Any = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : List[Any] = model.generate(lowerCAmelCase_ , do_sample=lowerCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase_ )
| 180 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[Any] = """informer"""
_A : Dict = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "student_t" , lowerCAmelCase_ = "nll" , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = "mean" , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 32 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = True , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.05 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 100 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_=True , lowerCAmelCase_ = "prob" , lowerCAmelCase_ = 5 , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
# time series specific configuration
A_ : Optional[Any] = prediction_length
A_ : Dict = context_length or prediction_length
A_ : Dict = distribution_output
A_ : Tuple = loss
A_ : Dict = input_size
A_ : Union[str, Any] = num_time_features
A_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
A_ : Optional[int] = scaling
A_ : Optional[Any] = num_dynamic_real_features
A_ : Tuple = num_static_real_features
A_ : Tuple = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A_ : List[str] = cardinality
else:
A_ : List[Any] = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A_ : int = embedding_dimension
else:
A_ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
A_ : Dict = d_model
A_ : Dict = encoder_attention_heads
A_ : Dict = decoder_attention_heads
A_ : Any = encoder_ffn_dim
A_ : Tuple = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Optional[int] = decoder_layers
A_ : List[str] = dropout
A_ : List[str] = attention_dropout
A_ : Any = activation_dropout
A_ : Any = encoder_layerdrop
A_ : List[Any] = decoder_layerdrop
A_ : str = activation_function
A_ : Optional[Any] = init_std
A_ : Optional[int] = use_cache
# Informer
A_ : Dict = attention_type
A_ : List[Any] = sampling_factor
A_ : List[Any] = distil
super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 180 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __A ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__: List[str] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__magic_name__: Union[str, Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
__magic_name__: Tuple = """The dog is cute and lives in the garden house"""
__magic_name__: Union[str, Any] = jnp.array([tokenizer.encode(__snake_case )] )
__magic_name__: Dict = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
__magic_name__: List[str] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__magic_name__: Optional[int] = model(__snake_case )["""last_hidden_state"""]
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __snake_case , atol=1E-3 ) )
| 213 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
# TODO: is there an appropriate internal test set?
UpperCAmelCase__ = "ssube/stable-diffusion-x4-upscaler-onnx"
def lowerCamelCase__ ( self : str , __snake_case : List[str]=0 ) -> Optional[int]:
__magic_name__: Dict = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__snake_case ) )
__magic_name__: List[Any] = torch.manual_seed(__snake_case )
__magic_name__: Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__magic_name__: List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[str] = self.get_dummy_inputs()
__magic_name__: Tuple = pipe(**__snake_case ).images
__magic_name__: Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: str = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
__magic_name__: str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__magic_name__: str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Tuple = self.get_dummy_inputs()
__magic_name__: Optional[Any] = pipe(**__snake_case ).images
__magic_name__: Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: List[Any] = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Tuple ) -> int:
__magic_name__: Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__magic_name__: str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Tuple = self.get_dummy_inputs()
__magic_name__: Union[str, Any] = pipe(**__snake_case ).images
__magic_name__: Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: List[Any] = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
__magic_name__: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__magic_name__: int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: int = self.get_dummy_inputs()
__magic_name__: List[str] = pipe(**__snake_case ).images
__magic_name__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: str = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
__magic_name__: Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__magic_name__: Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: List[Any] = self.get_dummy_inputs()
__magic_name__: Union[str, Any] = pipe(**__snake_case ).images
__magic_name__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: List[str] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def lowerCamelCase__ ( self : Any ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase__ ( self : List[str] ) -> int:
__magic_name__: Optional[int] = ort.SessionOptions()
__magic_name__: Any = False
return options
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
__magic_name__: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__magic_name__: List[Any] = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__magic_name__: int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Union[str, Any] = """A fantasy landscape, trending on artstation"""
__magic_name__: List[str] = torch.manual_seed(0 )
__magic_name__: List[Any] = pipe(
prompt=__snake_case , image=__snake_case , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__snake_case , output_type="""np""" , )
__magic_name__: int = output.images
__magic_name__: List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Optional[int] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase__ ( self : List[str] ) -> Any:
__magic_name__: Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__magic_name__: Optional[int] = init_image.resize((1_2_8, 1_2_8) )
__magic_name__: Any = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
__magic_name__: Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Dict = """A fantasy landscape, trending on artstation"""
__magic_name__: str = torch.manual_seed(0 )
__magic_name__: Dict = pipe(
prompt=__snake_case , image=__snake_case , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__snake_case , output_type="""np""" , )
__magic_name__: Tuple = output.images
__magic_name__: Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: List[str] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 213 | 1 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( *_a : Union[str, Any] ,**_a : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase_ (__a : Image ):
"""simple docstring"""
_a : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def __lowercase ( self : Optional[int] ,_a : Tuple ,_a : Any ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[Any] = DepthEstimationPipeline(model=_a ,image_processor=_a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : List[str] = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} ,_a )
import datasets
_a : str = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test' )
_a : List[str] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] ,_a ,)
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = 'Intel/dpt-large'
_a : str = pipeline('depth-estimation' ,model=_a )
_a : Any = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
_a : Union[str, Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) ,29.304 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) ,2.662 )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 229 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = StableDiffusionLDMaDPipeline
__UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_a : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=_a ,set_alpha_to_one=_a ,)
torch.manual_seed(0 )
_a : Tuple = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : List[str] = CLIPTextModel(_a )
_a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : Tuple=0 ):
'''simple docstring'''
if str(_a ).startswith('mps' ):
_a : Optional[Any] = torch.manual_seed(_a )
else:
_a : Dict = torch.Generator(device=_a ).manual_seed(_a )
_a : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Optional[Any] = self.get_dummy_components()
_a : int = StableDiffusionLDMaDPipeline(**_a )
_a : str = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Optional[int] = self.get_dummy_inputs(_a )
_a : Union[str, Any] = ldmad_pipe(**_a )
_a, _a : str = output.rgb, output.depth
_a : int = rgb[0, -3:, -3:, -1]
_a : List[str] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_a : Dict = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
_a : Union[str, Any] = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.get_dummy_components()
_a : int = StableDiffusionLDMaDPipeline(**_a )
_a : List[Any] = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Any = self.get_dummy_inputs(_a )
_a : List[str] = 3 * [inputs['prompt']]
# forward
_a : int = ldmad_pipe(**_a )
_a, _a : Union[str, Any] = output.rgb, output.depth
_a : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
_a : Tuple = depth_slice_a[0, -3:, -1]
_a : Tuple = self.get_dummy_inputs(_a )
_a : Tuple = 3 * [inputs.pop('prompt' )]
_a : List[Any] = ldmad_pipe.tokenizer(
_a ,padding='max_length' ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=_a ,return_tensors='pt' ,)
_a : Union[str, Any] = text_inputs['input_ids'].to(_a )
_a : List[str] = ldmad_pipe.text_encoder(_a )[0]
_a : int = prompt_embeds
# forward
_a : str = ldmad_pipe(**_a )
_a, _a : str = output.rgb, output.depth
_a : Tuple = rgb_slice_a[0, -3:, -3:, -1]
_a : Optional[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : Dict = PNDMScheduler(skip_prk_steps=_a )
_a : str = StableDiffusionLDMaDPipeline(**_a )
_a : str = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = self.get_dummy_inputs(_a )
_a : List[Any] = 'french fries'
_a : Dict = ldmad_pipe(**_a ,negative_prompt=_a )
_a, _a : Any = output.rgb, output.depth
_a : Tuple = rgb[0, -3:, -3:, -1]
_a : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_a : str = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
_a : Optional[int] = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Tuple ,_a : Union[str, Any] ,_a : int="cpu" ,_a : Union[str, Any]=torch.floataa ,_a : str=0 ):
'''simple docstring'''
_a : str = torch.Generator(device=_a ).manual_seed(_a )
_a : List[str] = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
_a : int = torch.from_numpy(_a ).to(device=_a ,dtype=_a )
_a : Union[str, Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Any = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_a : Optional[int] = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : Tuple = self.get_inputs(_a )
_a : Tuple = ldmad_pipe(**_a )
_a, _a : Dict = output.rgb, output.depth
_a : Union[str, Any] = rgb[0, -3:, -3:, -1].flatten()
_a : Union[str, Any] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_a : int = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
_a : Optional[Any] = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : str ,_a : Union[str, Any] ,_a : Any="cpu" ,_a : Union[str, Any]=torch.floataa ,_a : Tuple=0 ):
'''simple docstring'''
_a : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
_a : Optional[Any] = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
_a : Any = torch.from_numpy(_a ).to(device=_a ,dtype=_a )
_a : Dict = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
_a : str = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : List[Any] = self.get_inputs(_a )
_a : Union[str, Any] = ldmad_pipe(**_a )
_a, _a : Optional[Any] = output.rgb, output.depth
_a : Dict = 0.49_5586
_a : Optional[Any] = 0.3379_5515
_a : str = 112.4_8518
_a : Optional[Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
_a : int = self.get_inputs(_a )
_a : Tuple = ldmad_pipe(**_a )
_a, _a : Optional[int] = output.rgb, output.depth
_a : Union[str, Any] = 0.419_4127
_a : Tuple = 0.3537_5586
_a : Tuple = 0.563_8502
_a : Tuple = 0.3468_6103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 229 | 1 |
'''simple docstring'''
def snake_case__ ( a ) -> list[int]:
'''simple docstring'''
snake_case__ = len(a )
for i in range(a ):
for j in range(i + 1 , a ):
if numbers[j] < numbers[i]:
snake_case__ , snake_case__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a__ = input('''Enter numbers separated by a comma:\n''').strip()
a__ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted)) | 719 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a__ = logging.get_logger(__name__)
def snake_case__ ( a , a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = set()
snake_case__ = []
def parse_line(a ):
for line in fp:
if isinstance(a , a ):
snake_case__ = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(a ) > 0:
snake_case__ = """\n""".join(a )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(a )
buffer.clear()
continue
else:
snake_case__ = line.strip()
buffer.append(a )
if from_gh:
for filename in os.listdir(a ):
snake_case__ = os.path.join(a , a )
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with open(a ) as fp:
parse_line(a )
else:
try:
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a ) as fp:
parse_line(a )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def snake_case__ ( a , a ) -> int:
'''simple docstring'''
snake_case__ = set()
snake_case__ = [os.path.join(a , a ) for p in os.listdir(a ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a , a ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( a ) -> int:
'''simple docstring'''
return values.split(""",""" )
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a__ = parser.parse_args()
a__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a__ = extract_warnings(args.output_dir, args.targets)
a__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 566 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.